blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca30c065fa064d98cd699f2fce97525a36130d24
|
a1119965e2e3bdc40126fd92f4b4b8ee7016dfca
|
/trunk/repy/tests/ut_repytests_encodingcommentisignored.py
|
6158b4dcedcbf4dd34764b31884788af8eca2271
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
SeattleTestbed/attic
|
0e33211ddf39efdbcf5573d4fc7fa5201aa7310d
|
f618a962ce2fd3c4838564e8c62c10924f5df45f
|
refs/heads/master
| 2021-06-10T23:10:47.792847 | 2017-05-15T12:05:43 | 2017-05-15T12:05:43 | 20,154,061 | 0 | 1 | null | 2014-10-16T17:21:06 | 2014-05-25T12:34:00 |
Python
|
UTF-8
|
Python
| false | false | 179 |
py
|
#!/usr/bin/env python
# -*- coding: rot13 -*-
#pragma repy
#pragma error NameError
# this will raise a NameError if this isn't rot13... (cevag == print)
cevag('hello world')
|
[
"USER@DOMAIN"
] |
USER@DOMAIN
|
866ff7508fcdecdbbeb77624b1ce0bde394c5c83
|
1b78a071c2134beafc265b839ba8acba63142be2
|
/intersight/models/os_windows_parameters.py
|
f22edebf6c628538017f34e5c2fad86260c944c0
|
[
"Apache-2.0"
] |
permissive
|
dyoshiha/intersight-python
|
59c2ed3f751726a1d7c0e4254f1203e6546f1d47
|
01d1abcf8a9dcee0fe9150cdec70eb39d76ca290
|
refs/heads/master
| 2020-12-30T07:32:16.452334 | 2020-02-03T21:32:36 | 2020-02-03T21:32:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,942 |
py
|
# coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1295
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class OsWindowsParameters(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'edition': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'edition': 'Edition'
}
def __init__(self, object_type=None, edition='Standard'):
"""
OsWindowsParameters - a model defined in Swagger
"""
self._object_type = None
self._edition = None
if object_type is not None:
self.object_type = object_type
if edition is not None:
self.edition = edition
@property
def object_type(self):
"""
Gets the object_type of this OsWindowsParameters.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:return: The object_type of this OsWindowsParameters.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this OsWindowsParameters.
The concrete type of this complex type. The ObjectType property must be set explicitly by API clients when the type is ambiguous. In all other cases, the ObjectType is optional. The type is ambiguous when a managed object contains an array of nested documents, and the documents in the array are heterogeneous, i.e. the array can contain nested documents of different types.
:param object_type: The object_type of this OsWindowsParameters.
:type: str
"""
self._object_type = object_type
@property
def edition(self):
"""
Gets the edition of this OsWindowsParameters.
Lists all the editions supported for Windows Server installation.
:return: The edition of this OsWindowsParameters.
:rtype: str
"""
return self._edition
@edition.setter
def edition(self, edition):
"""
Sets the edition of this OsWindowsParameters.
Lists all the editions supported for Windows Server installation.
:param edition: The edition of this OsWindowsParameters.
:type: str
"""
allowed_values = ["Standard", "StandardCore", "Datacenter", "DatacenterCore"]
if edition not in allowed_values:
raise ValueError(
"Invalid value for `edition` ({0}), must be one of {1}"
.format(edition, allowed_values)
)
self._edition = edition
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, OsWindowsParameters):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] | |
1412a681a3399d9ea069e81d3912099989b16321
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/YRwZvg5Pkgw4pEWC5_11.py
|
aecb9fbb1c52ca98444155f4ac40055298714c4d
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 183 |
py
|
def flick_switch(lst):
a=True
c=[]
for i in range(len(lst)):
if lst[i]!="flick":
c.append(a)
if lst[i]=="flick":
a=not a
c.append(a)
return c
|
[
"[email protected]"
] | |
262e4ad5a1421368dfd879c840aabfff3ff1235f
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/PYTHIA6/PYTHIA6_Tauola_SM_H_2tau_zh_mH140_lepdecay_8TeV_cff.py
|
82691bffb6acd1a22174cf7f3d23d9a9bfb00d7a
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 |
Python
|
UTF-8
|
Python
| false | false | 4,537 |
py
|
import FWCore.ParameterSet.Config as cms
source = cms.Source("EmptySource")
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
from GeneratorInterface.ExternalDecays.TauolaSettings_cff import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
# put here the efficiency of your filter (1. if no filter)
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
# put here the cross section of your process (in pb)
crossSection = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
comEnergy = cms.double(8000.0),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
TauolaPolar,
TauolaDefaultInputCards
),
parameterSets = cms.vstring('Tauola')
),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('PMAS(25,1)=140.0 !mass of Higgs',
'MSEL=0 ! user selection for process',
'MSUB(102)=0 !ggH',
'MSUB(123)=0 !ZZ fusion to H',
'MSUB(124)=0 !WW fusion to H',
'MSUB(24)=1 !ZH production',
'MSUB(26)=0 !WH production',
'MSUB(121)=0 !gg to ttH',
'MSUB(122)=0 !qq to ttH',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=1 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=1 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'MDME(190,1)=0 ! W decay into dbar u',
'MDME(191,1)=0 ! W decay into dbar c',
'MDME(192,1)=0 ! W decay into dbar t',
'MDME(194,1)=0 ! W decay into sbar u',
'MDME(195,1)=0 ! W decay into sbar c',
'MDME(196,1)=0 ! W decay into sbar t',
'MDME(198,1)=0 ! W decay into bbar u',
'MDME(199,1)=0 ! W decay into bbar c',
'MDME(200,1)=0 ! W decay into bbar t',
'MDME(206,1)=0 ! W decay into e+ nu_e',
'MDME(207,1)=0 ! W decay into mu+ nu_mu',
'MDME(208,1)=0 ! W decay into tau+ nu_tau',
'MDME(210,1)=0 !Higgs decay into dd',
'MDME(211,1)=0 !Higgs decay into uu',
'MDME(212,1)=0 !Higgs decay into ss',
'MDME(213,1)=0 !Higgs decay into cc',
'MDME(214,1)=0 !Higgs decay into bb',
'MDME(215,1)=0 !Higgs decay into tt',
'MDME(216,1)=0 !Higgs decay into',
'MDME(217,1)=0 !Higgs decay into Higgs decay',
'MDME(218,1)=0 !Higgs decay into e nu e',
'MDME(219,1)=0 !Higgs decay into mu nu mu',
'MDME(220,1)=1 !Higgs decay into tau nu tau',
'MDME(221,1)=0 !Higgs decay into Higgs decay',
'MDME(222,1)=0 !Higgs decay into g g',
'MDME(223,1)=0 !Higgs decay into gam gam',
'MDME(224,1)=0 !Higgs decay into gam Z',
'MDME(225,1)=0 !Higgs decay into Z Z',
'MDME(226,1)=0 !Higgs decay into W W'
),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /local/reps/CMSSW/CMSSW/Configuration/GenProduction/python/EightTeV/PYTHIA6_Tauola_SM_H_2tau_wh_zh_tth_mH1140_lepdecay_8TeV_cff.py,v $'),
annotation = cms.untracked.string('PYTHIA6 WH/ZH/ttH, H->tautau mH=140GeV with TAUOLA at 8TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"[email protected]"
] | |
c087d32f2facd8e4c3b24b1fd7edf2e63c4c486e
|
e28009b0a4584e8d128ed6fbd4ba84a1db11d1b9
|
/1.Two Sum/Two Sum.py
|
49443b25034a21dc94c2086ac0cca41d3b957794
|
[] |
no_license
|
jerrylance/LeetCode
|
509d16e4285296167feb51a80d6c382b3833405e
|
06ed3e9b27a3f1c0c517710d57fbbd794fd83e45
|
refs/heads/master
| 2020-12-02T23:10:27.382142 | 2020-08-02T02:03:54 | 2020-08-02T02:03:54 | 231,141,551 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,575 |
py
|
# LeetCode Solution
# Zeyu Liu
# 2019.12.31
# 1.Two Sum
# method 1
# 暴力
from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
renums = []
n = len(nums)
for i in range(n):
for j in range(i+1,n):
if nums[i]+nums[j] == target:
renums.append(i)
renums.append(j)
return(renums)
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15],9))
# method 2
# 哈希(比较好)
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dictory = {}
for i, num in enumerate(nums):
if num in dictory:
return[dictory[num], i]
else:
dictory[target - num] = i
# enumerate()函数可以把一个List按照索引从小到大的顺序组成一个字典
# 速度最快
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15],9))
# method 3
# 切片
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
n = len(nums)
for i in range(n):
if target - nums[i] in nums[i+1:]:
return [i, nums.index(target - nums[i],i+1)]
# 这里return中的i+1,是index函数中的参数,意味着索引起始值从自己下一个数开始,如果不设置,那么如果有相等value时,如(3,3),6这种情况下会返回[0,0],而不是[0,1]
# 切片占用内存较小
# transfer method
solve = Solution()
print(solve.twoSum([2,7,11,15,-2],9))
|
[
"[email protected]"
] | |
979baf5b9d39a2a3c69640d35c34d92478815b1f
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2018/04/12/Creating a Weather App in Django Using Python Requests [Part 1]/weather_app_django/the_weather/weather/views.py
|
7a14b9b7253bd4e4585a206e82820683967d15e3
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174 | 2023-08-11T07:07:45 | 2023-08-11T07:07:45 | 186,743,986 | 698 | 2,347 |
Unlicense
| 2022-10-06T04:06:56 | 2019-05-15T03:40:45 |
HTML
|
UTF-8
|
Python
| false | false | 857 |
py
|
import requests
from django.shortcuts import render
from .models import City
from .forms import CityForm
def index(request):
url = 'http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=YOUR_API_KEY'
if request.method == 'POST':
form = CityForm(request.POST)
form.save()
form = CityForm()
cities = City.objects.all()
weather_data = []
for city in cities:
r = requests.get(url.format(city)).json()
city_weather = {
'city' : city.name,
'temperature' : r['main']['temp'],
'description' : r['weather'][0]['description'],
'icon' : r['weather'][0]['icon'],
}
weather_data.append(city_weather)
context = {'weather_data' : weather_data, 'form' : form}
return render(request, 'weather/weather.html', context)
|
[
"[email protected]"
] | |
cecca4dec78b16568a5dd8a9f07ecf906268784e
|
49a93012ce18b72abdb85aae1af09504fa039b6c
|
/20년 2월/1479.py
|
bcaca841af25e0c185a53b6c16c59829d9464ae7
|
[] |
no_license
|
JinleeJeong/Algorithm
|
ca3e755a29537f8d82ef770f174fd055242dd708
|
a81257d7e4a54a00ac2c9a1dd324cc7eeb765240
|
refs/heads/master
| 2020-12-08T13:26:27.917450 | 2020-05-12T07:16:08 | 2020-05-12T07:16:08 | 232,992,516 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 514 |
py
|
n, m = map(int, input().split())
matrix = [[0]*m for i in range(n)]
count = 0
#0부터 3까지 0~4
for i in range(0, n+m-1): # 0 1 2 3 n+m-1인 이유는 2행 3열을 나타내기 때문에, 나열했을 때, i값과 같아야 함!!
for j in range(0, m): # 0 1 2
for k in range(0, n): # 0 1
if j+k == i:
count += 1
matrix[k][j] = count
for i in range(0, n):
for j in range(m-1, -1, -1):
print(matrix[i][j], end=' ')
print()
|
[
"[email protected]"
] | |
8e0c705abcc33c690cb87bdeaeccab37c03a3755
|
52b43ba9fdba64b9a82f8042ebb19190d811b6de
|
/ck/incubator/cbench/setup.py
|
42dbabf39607cae66eddc9c4792a66cbb0107282
|
[
"Apache-2.0",
"CC-BY-SA-3.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cknowledge/ck
|
4f18f6dd6c6278a8942d7288ae5c68007da190fa
|
ffcf31d18b14f55b8f20b7a9d078ebd61023ca7e
|
refs/heads/master
| 2023-03-15T18:10:57.003077 | 2023-03-06T12:11:41 | 2023-03-06T12:11:41 | 256,570,425 | 0 | 0 |
BSD-3-Clause
| 2020-04-17T17:40:09 | 2020-04-17T17:40:08 | null |
UTF-8
|
Python
| false | false | 3,205 |
py
|
#
# Developer(s): Grigori Fursin
# Herve Guillou
#
import os
import sys
import imp
############################################################
from setuptools import find_packages, setup, convert_path
try:
from io import open
except ImportError:
pass
############################################################
# Version
version = imp.load_source(
'cbench.__init__', os.path.join('cbench', '__init__.py')).__version__
# Default portal
portal_url='https://cKnowledge.io'
############################################################
setup(
name='cbench',
author="Grigori Fursin",
author_email="[email protected]",
version=version,
description="A cross-platform client to perform collaborative and reproducible benchmarking, optimization and co-design of software and hardware for emerging workloads (AI, ML, quantum, IoT) via the open cKnowledge.io portal",
license="Apache Software License (Apache 2.0)",
long_description=open(convert_path('./README.md'), encoding="utf-8").read(),
long_description_content_type="text/markdown",
url=portal_url,
python_requires=">=2.7",
packages=find_packages(exclude=["tests*", "docs*"]),
package_data={"cbench":['static/*']},
include_package_data=True,
install_requires=[
'requests',
'click>=7.0',
'ck',
'virtualenv'
],
entry_points={
"console_scripts":
[
"cr = cbench.main:cli",
"cb = cbench.main:cli",
"cbench = cbench.main:cli"
]
},
zip_safe=False,
keywords="reproducible benchmarking, customizable benchmarking, portable workflows, reusable computational components, reproducibility, collaborative experiments, automation, optimization, co-design, collective knowledge",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Environment :: Console",
"Environment :: Plugins",
"Environment :: Web Environment",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
"Topic :: System",
"Topic :: System :: Benchmark",
"Topic :: Education",
"Topic :: Utilities"
],
)
###########################################################
# Get release notes
import cbench.comm_min
r=cbench.comm_min.send({'url':portal_url+'/api/v1/?',
'action':'event',
'dict':{'type':'get-cbench-release-notes','version':version}})
notes=r.get('notes','')
if notes!='':
print ('*********************************************************************')
print ('Release notes:')
print ('')
print (notes)
print ('*********************************************************************')
|
[
"[email protected]"
] | |
0d8b8bb1d3429f2aa1d4bab4fa0f23598807ad86
|
7985715183962847e4717da8be46ce9415bd4a3f
|
/tests/parse/parse_bbox_input.py
|
93ea3eff60dfad44a9f8853288306d6449478cf6
|
[] |
no_license
|
mgax/pywps-4
|
68bcd2e3398f2a312715e43135797c6906c1f7c9
|
03fd63d490d22b86dc1c14cb51eb6fb437812ca6
|
refs/heads/master
| 2020-04-10T13:30:09.145281 | 2013-06-03T20:22:23 | 2013-06-03T20:22:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,374 |
py
|
"""Test parsing of BoundingBoxInput
"""
import os
import sys
from io import StringIO
from lxml import objectify
pywpsPath = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0],"..",".."))
sys.path.insert(0,pywpsPath)
sys.path.append(pywpsPath)
import unittest
from pywps.request.execute.bbox import BoundingBoxInput
class ParseBBoxInputTestCase(unittest.TestCase):
def setUp(self):
self.inpt = BoundingBoxInput("bbox")
def test_parse_bbox_input_GET(self):
# testing basic parsing
request="bbox=1,2,3,4"
self.inpt.parse_url(request)
self.assertEquals(1,self.inpt.get_value().left)
self.assertEquals(2,self.inpt.get_value().dimensions)
# parse crs
request="bbox=1,2,3,4,epsg:4326"
self.inpt.parse_url(request)
self.assertEquals("EPSG:4326",self.inpt.get_crs(1).getcode())
def test_parse_bbox_input_POST(self):
"""Parse bounding box input XML"""
req_str = StringIO("""<wps:Input xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1">
<ows:Identifier>bbox</ows:Identifier>
<ows:Title>Bounding box title</ows:Title>
<ows:BoundingBox xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/ows/1.1 owsCommon.xsd"
crs="urn:ogc:crs:EPSG:6.3:26986" dimensions="2">
<!-- Example. Primary editor: Arliss Whiteside. Last updated 2005- 01-25 -->
<ows:LowerCorner>189000 834000</ows:LowerCorner>
<ows:UpperCorner>285000 962000</ows:UpperCorner>
</ows:BoundingBox>
</wps:Input>""")
request = objectify.parse(req_str)
self.inpt.parse_xml(request.getroot())
self.assertEquals(189000,self.inpt.get_value(2).left)
self.assertEquals(962000,self.inpt.get_value(2).top)
self.assertEquals(26986,self.inpt.get_crs(2).code)
self.assertEquals(2,self.inpt.get_dimensions(2))
pass
def test_parse_bbox_wgs84_POST(self):
"""Parse bounding box input XML as WGS84"""
req_str = StringIO("""<wps:Input xmlns:wps="http://www.opengis.net/wps/1.0.0" xmlns:ows="http://www.opengis.net/ows/1.1">
<ows:Identifier>bbox</ows:Identifier>
<ows:Title>Bounding box WGS84 title</ows:Title>
<ows:WGS84BoundingBox xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/ows/1.1 owsCommon.xsd">
<!-- Example. Primary editor: Arliss Whiteside. Last updated 2004/10/13. -->
<ows:LowerCorner>-71.63 41.75</ows:LowerCorner>
<ows:UpperCorner>-70.78 42.90</ows:UpperCorner>
</ows:WGS84BoundingBox>
</wps:Input>""")
request = objectify.parse(req_str)
self.inpt.parse_xml(request.getroot())
self.assertEquals(-71.63,self.inpt.get_value(3).left)
self.assertEquals(42.90,self.inpt.get_value(3).top)
self.assertEquals("EPSG:4326",self.inpt.get_value(3).get_crs().getcode())
pass
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(ParseBBoxInputTestCase)
unittest.TextTestRunner(verbosity=4).run(suite)
|
[
"[email protected]"
] | |
fa3703b31c4348a9ad084e7286d2ee6b4c101c05
|
76adadc595cf0e27f03833036ecb9e7e9387c7d5
|
/obstacle_avoidance_gazebo_and_tx2/Navigator_2D_gazebo/Pos2PosController.py
|
551b7e6c1ece177a9ce71037ff31a5155b4b3d49
|
[
"Apache-2.0"
] |
permissive
|
hddxds/scripts_from_gi
|
2fdef4dc747b6a269a1aa9df871afaca19bbe178
|
afb8977c001b860335f9062464e600d9115ea56e
|
refs/heads/master
| 2022-12-08T21:32:42.307594 | 2020-09-07T13:25:40 | 2020-09-07T13:25:40 | 293,529,833 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
#encoding=utf-8
def bangbang(error): # on 1 axis:
if error>THRES:
move(-error)
else:
pass
if error<-THRES:
move(error)
else:
pass
def pid():
pass
def
class Pos2PosController:
def __init__(self):
pass
def main_thread(self):
self.init_mavros()
self.main_loop()
def init_mavros(self):
while self.mavros_state == "OFFBOARD":
def set_mavros_mode(self, mode):
self.mavros_mode
def get
def main_loop(self):
while True:
#PID controller or bang-bang controller
get_target() # continuous x,y,z,yaw
get_current_pos()
err = xxx
do_action()
sleep(0.1)
def mav_move(self, position_x, position_y, position_z, relative_yaw=0):
self.set_status(status.GOING_TO_TARGET)
new_command = Command()
new_command.header.stamp = rospy.Time.now()
# use body frame
new_command.sub_mode = 0
# use command = MOVE according to AMO lab
new_command.command = 6
new_command.pos_sp[0] = position_x
new_command.pos_sp[1] = position_y
new_command.pos_sp[2] = position_z
new_command.vel_sp[0] = 0.0
new_command.vel_sp[1] = 0.0
new_command.yaw_sp = relative_yaw # TODO:fix this with 2step: 1:move;2.rotate(in absolute mode)
new_command.comid = self.cur_command_id
# self.task_id = new_command.comid
self.prev_command_id = self.cur_command_id
self.cur_command_id = self.cur_command_id + 1
self.mavros_control_pub.publish(new_command)
if self.reachTargetPosition(new_command):
return True
else:
return False
|
[
"[email protected]"
] | |
ca3978a5f8b151caf4f1debd0bc43cc60768672a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03971/s931383808.py
|
524b86c116645356f3c2bf545938cf9268af51e8
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 312 |
py
|
n,a,b = map(int , input().split())
s = input()
Judgment = 0
rank = 0
overseas=0
for num in range(n):
if s[num] == 'a' and rank < a+b:
print('Yes')
rank = rank+1
elif s[num] == 'b' and rank < a+b and overseas < b:
print('Yes')
rank = rank +1
overseas = overseas + 1
else:
print('No')
|
[
"[email protected]"
] | |
c697e60d74d74ff44aacff3b5a9830ed04f0219a
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/youTube/codebasics/numpyTutorial/2_basicArrayOperations.py
|
39a039c8f30e1b3b68b438c43594839bac54cbd0
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 |
MIT
| 2022-03-06T11:07:18 | 2016-06-15T06:57:19 |
Python
|
UTF-8
|
Python
| false | false | 364 |
py
|
import numpy as np
a = np.array([5, 6, 9])
print('a: ', a)
print('a dimension: ', a.ndim)
print('access elemeent', a[1])
print('itemsize: ', a.itemsize)
print('############### Multidimensional ###############')
t = np.array([ [1, 2], [3, 4], [5, 6]])
print('t: ', t)
print('t dimension: ', t.ndim)
print('access element: ', t[2])
print('itemsize: ', t.itemsize)
|
[
"[email protected]"
] | |
855b7266b343bfa7d96dc95976e72742f81f2cd1
|
3c1ad0919924ed8d96ae5f9d9a10b97cfdf1ee38
|
/topic_categories.py
|
66576dd60242eef788d762326154e23626f8eccc
|
[] |
no_license
|
emonson/CopyrightScripts
|
4439ba584840e74ebdc5ab6083887e530757de64
|
862e5d2eb0af848647bf1cb2d95519071a00adc0
|
refs/heads/master
| 2020-05-18T15:37:15.926524 | 2017-03-16T14:51:08 | 2017-03-16T14:51:08 | 1,569,450 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,256 |
py
|
#!/usr/bin/env python
"""
Script to move articles (e.g. de, de la, d'...) from before
last name to after first name in Hilary's FileMaker Pro database
through the ODBC connection
Takes in an Excel workbook with three sheets:
one for the list of articles
one for the column and table names (Table, ID name, Last name, First name)
one for name exceptions that should not be changed
31 Jan 2012 -- E Monson
"""
from openpyxl import load_workbook
import numpy as N
import os
from pymongo import Connection
# Make a connection to MongoDB
try:
db_conn = Connection()
# db_conn = Connection("emo2.trinity.duke.edu", 27017)
except ConnectionFailure:
print "couldn't connect: be sure that Mongo is running on localhost:27017"
sys.exit(1)
db = db_conn['fashion_ip']
def n_str(s, a):
"""Deals with None in first_name"""
if s is None:
return unicode(a.strip())
else:
return unicode(s.decode('utf8').strip() + ' ' + a.strip())
in_file = '/Users/emonson/Data/ArtMarkets/Katherine/mallet/nonstate_topic_keys_KDD_Edits.xlsx'
doc_topics_file = '/Users/emonson/Data/ArtMarkets/Katherine/mallet/nonstate_copy_200_doc_topics.txt'
# Load in Excel sheet with topic keys
wb = load_workbook(in_file)
sheet = wb.get_sheet_by_name("nonstate_copy_200_topic_keys.tx")
row_tuples = [tuple(xx.value for xx in yy) for yy in sheet.rows]
ntopics = len(sheet.rows)
subject_names = []
subject_vectors = []
for tt in row_tuples:
subs = tt[0] # subject string
top = tt[1] # topic index
if subs is not None:
# compound subjects separated by commas
subs_list = [xx.strip() for xx in subs.split(',')]
for sub in subs_list:
if sub not in subject_names:
subject_names.append(sub)
subject_vectors.append(N.zeros(ntopics))
idx = subject_names.index(sub)
subject_vectors[idx][top] = 1
# Read in document topics and calculate subject mixtures
file_ids = []
file_subjects = []
for jj, line in enumerate(open(doc_topics_file)):
# Header line
if jj == 0:
continue
ll = line.rstrip().split(' ')
# Get rid of document index
del ll[0]
# Grab the file ID
file_ids.append(os.path.splitext(os.path.basename(ll[0]))[0])
del ll[0]
# Generate the ordered array of topic weight values
# (initially ordered by weight rather than by topic)
weights = N.zeros(ntopics)
for ii in range(0,len(ll),2):
weights[int(ll[ii])] = float(ll[ii+1])
# Do a dot product to find the subject overlap
subject_weights = []
for ss in subject_vectors:
subject_weights.append(N.dot(ss,weights))
file_subjects.append(subject_weights)
print "Done computing subject vectors"
# Probably should have output MongoDB docs with _id as name of file
# to make sure it's really unique, but I think the Google Scholar file name
# is also a unique identifier.
# Clear out all subjects first so we don't get leftovers from another analysis
print "Clearing out old subjects"
db.docs.update({},{'$unset':{'subjects':1}})
# Add in new subject weights as name:weight pairs
print "Updating new subjects"
for name, vector in zip(file_ids, file_subjects):
sub_dict = dict(zip(subject_names, vector))
db.docs.update({'filename':name+'.html'},{'$set':{'subjects':sub_dict}}, upsert=False, multi=False)
|
[
"[email protected]"
] | |
e3696afcd45abc84967cac4f0b35f6872f307c18
|
1033674d794fd0e09f16486d9f3cbb17a326d764
|
/_unittests/ut_df/test_streaming_dataframe.py
|
0e9cba3cbfa0854e73b4b0324757786e3b14596c
|
[
"MIT"
] |
permissive
|
chanpatrick/pandas_streaming
|
a074e576abfd7f67ecf524a8e71c325684d1ef17
|
f24acafcb6faedfa53982a1700d133413e7cce5f
|
refs/heads/master
| 2020-06-08T06:34:20.923108 | 2019-06-10T21:03:42 | 2019-06-10T21:03:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,509 |
py
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=4s)
"""
import os
import unittest
import pandas
import numpy
from pyquickhelper.pycode import ExtTestCase, get_temp_folder
from pandas_streaming.data import dummy_streaming_dataframe
from pandas_streaming.exc import StreamingInefficientException
from pandas_streaming.df import StreamingDataFrame
from pandas_streaming.df.dataframe import StreamingDataFrameSchemaError
class TestStreamingDataFrame(ExtTestCase):
def test_shape(self):
sdf = dummy_streaming_dataframe(100)
dfs = [df for df in sdf]
self.assertEqual(len(dfs), 10)
self.assertEqual(len(dfs), 10)
shape = sdf.shape
self.assertEqual(shape, (100, 2))
self.assertRaise(lambda: sdf.sort_values(
"r"), StreamingInefficientException)
def test_init(self):
sdf = dummy_streaming_dataframe(100)
df1 = sdf.to_df()
sdf2 = StreamingDataFrame(sdf)
df2 = sdf2.to_df()
self.assertEqualDataFrame(df1, df2)
def test_to_csv(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.to_csv()
self.assertStartsWith(",cint,cstr\n0,0,s0",
st.replace('\r', ''))
st = sdf.to_csv()
self.assertStartsWith(",cint,cstr\n0,0,s0",
st.replace('\r', ''))
def test_iterrows(self):
sdf = dummy_streaming_dataframe(100)
rows = list(sdf.iterrows())
self.assertEqual(sdf.shape[0], len(rows))
rows = list(sdf.iterrows())
self.assertEqual(sdf.shape[0], len(rows))
def test_head(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.head()
self.assertEqual(st.shape, (5, 2))
st = sdf.head(n=20)
self.assertEqual(st.shape, (20, 2))
st = sdf.head(n=20)
self.assertEqual(st.shape, (20, 2))
def test_tail(self):
sdf = dummy_streaming_dataframe(100)
st = sdf.tail()
self.assertEqual(st.shape, (5, 2))
st = sdf.tail(n=20)
self.assertEqual(st.shape, (10, 2))
def test_read_csv(self):
temp = get_temp_folder(__file__, "temp_read_csv")
df = pandas.DataFrame(data=dict(a=[5, 6], b=["er", "r"]))
name = os.path.join(temp, "df.csv")
name2 = os.path.join(temp, "df2.csv")
name3 = os.path.join(temp, "df3.csv")
df.to_csv(name, index=False)
df.to_csv(name2, index=True)
sdf = StreamingDataFrame.read_csv(name)
text = sdf.to_csv(index=False)
sdf2 = StreamingDataFrame.read_csv(name2, index_col=0)
text2 = sdf2.to_csv(index=True)
sdf2.to_csv(name3, index=True)
with open(name, "r") as f:
exp = f.read()
with open(name2, "r") as f:
exp2 = f.read()
with open(name3, "r") as f:
text3 = f.read()
self.assertEqual(text.replace('\r', ''), exp)
sdf2 = StreamingDataFrame.read_df(df)
self.assertEqualDataFrame(sdf.to_dataframe(), sdf2.to_dataframe())
self.assertEqual(text2.replace('\r', ''), exp2)
self.assertEqual(text3.replace('\r', '').replace('\n\n', '\n'),
exp2.replace('\r', ''))
def test_where(self):
sdf = dummy_streaming_dataframe(100)
cols = sdf.columns
self.assertEqual(list(cols), ['cint', 'cstr'])
dts = sdf.dtypes
self.assertEqual(len(dts), 2)
res = sdf.where(lambda row: row["cint"] == 1)
st = res.to_csv()
self.assertStartsWith(",cint,cstr\n0,,\n1,1.0,s1",
st.replace('\r', ''))
res = sdf.where(lambda row: row["cint"] == 1)
st = res.to_csv()
self.assertStartsWith(",cint,cstr\n0,,\n1,1.0,s1",
st.replace('\r', ''))
def test_dataframe(self):
sdf = dummy_streaming_dataframe(100)
df = sdf.to_dataframe()
self.assertEqual(df.shape, (100, 2))
def test_sample(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(frac=0.1)
self.assertLesser(res.shape[0], 30)
self.assertRaise(lambda: sdf.sample(n=5), ValueError)
res = sdf.sample(frac=0.1)
self.assertLesser(res.shape[0], 30)
self.assertRaise(lambda: sdf.sample(n=5), ValueError)
def test_sample_cache(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(frac=0.1, cache=True)
df1 = res.to_df()
df2 = res.to_df()
self.assertEqualDataFrame(df1, df2)
self.assertTrue(res.is_stable(n=df1.shape[0], do_check=True))
self.assertTrue(res.is_stable(n=df1.shape[0], do_check=False))
res = sdf.sample(frac=0.1, cache=False)
self.assertFalse(res.is_stable(n=df1.shape[0], do_check=False))
def test_sample_reservoir_cache(self):
sdf = dummy_streaming_dataframe(100)
res = sdf.sample(n=10, cache=True, reservoir=True)
df1 = res.to_df()
df2 = res.to_df()
self.assertEqualDataFrame(df1, df2)
self.assertEqual(df1.shape, (10, res.shape[1]))
self.assertRaise(lambda: sdf.sample(n=10, cache=False, reservoir=True),
ValueError)
self.assertRaise(lambda: sdf.sample(frac=0.1, cache=True, reservoir=True),
ValueError)
def test_apply(self):
sdf = dummy_streaming_dataframe(100)
self.assertNotEmpty(list(sdf))
sdf = sdf.applymap(str)
self.assertNotEmpty(list(sdf))
sdf = sdf.apply(lambda row: row[["cint"]] + "r", axis=1)
self.assertNotEmpty(list(sdf))
text = sdf.to_csv(header=False)
self.assertStartsWith("0,0r\n1,1r\n2,2r\n3,3r",
text.replace('\r', ''))
def test_train_test_split(self):
sdf = dummy_streaming_dataframe(100)
tr, te = sdf.train_test_split(index=False, streaming=False)
trsdf = StreamingDataFrame.read_str(tr)
tesdf = StreamingDataFrame.read_str(te)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cint").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
def test_train_test_split_streaming(self):
sdf = dummy_streaming_dataframe(100, asfloat=True)
trsdf, tesdf = sdf.train_test_split(
streaming=True, unique_rows=True, partitions=[0.7, 0.3])
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trdf2 = trsdf.to_dataframe()
tedf2 = tesdf.to_dataframe()
df_val = pandas.concat([trdf2, tedf2])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
self.assertEqual(trdf.shape, trdf2.shape)
self.assertEqual(tedf.shape, tedf2.shape)
self.assertGreater(trdf.shape[0], tedf.shape[0])
self.assertGreater(trdf2.shape[0], tedf2.shape[0])
def test_train_test_split_streaming_tiny(self):
df = pandas.DataFrame(data=dict(X=[4.5, 6, 7], Y=["a", "b", "c"]))
sdf2 = StreamingDataFrame.read_df(pandas.concat([df, df]))
sdftr, sdfte = sdf2.train_test_split(test_size=0.5)
df1 = sdfte.head()
df2 = sdfte.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
df1 = sdftr.head()
df2 = sdftr.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
sdf = StreamingDataFrame.read_df(df)
sdf2 = sdf.concat(sdf, axis=0)
sdftr, sdfte = sdf2.train_test_split(test_size=0.5)
df1 = sdfte.head()
df2 = sdfte.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
df1 = sdftr.head()
df2 = sdftr.head()
if df1 is not None or df2 is not None:
self.assertEqualDataFrame(df1, df2)
def test_train_test_split_streaming_strat(self):
sdf = dummy_streaming_dataframe(100, asfloat=True,
tify=["t1" if i % 3 else "t0" for i in range(0, 100)])
trsdf, tesdf = sdf.train_test_split(
streaming=True, unique_rows=True, stratify="tify")
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cfloat").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
trgr = trdf.groupby("tify").count()
trgr["part"] = 0
tegr = tedf.groupby("tify").count()
tegr["part"] = 1
gr = pandas.concat([trgr, tegr])
self.assertGreater(gr['cfloat'].min(), 4)
def test_train_test_split_file(self):
temp = get_temp_folder(__file__, "temp_train_test_split_file")
names = [os.path.join(temp, "train.txt"),
os.path.join(temp, "test.txt")]
sdf = dummy_streaming_dataframe(100)
sdf.train_test_split(names, index=False, streaming=False)
trsdf = StreamingDataFrame.read_csv(names[0])
tesdf = StreamingDataFrame.read_csv(names[1])
self.assertGreater(trsdf.shape[0], 20)
self.assertGreater(tesdf.shape[0], 20)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
self.assertGreater(trdf.shape[0], 20)
self.assertGreater(tedf.shape[0], 20)
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cint").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
def test_train_test_split_file_pattern(self):
temp = get_temp_folder(__file__, "temp_train_test_split_file_pattern")
sdf = dummy_streaming_dataframe(100)
names = os.path.join(temp, "spl_{0}.txt")
self.assertRaise(lambda: sdf.train_test_split(
names, index=False, streaming=False), ValueError)
names = os.path.join(temp, "spl_{}.txt")
tr, te = sdf.train_test_split(names, index=False, streaming=False)
trsdf = StreamingDataFrame.read_csv(tr)
tesdf = StreamingDataFrame.read_csv(te)
trdf = trsdf.to_dataframe()
tedf = tesdf.to_dataframe()
df_exp = sdf.to_dataframe()
df_val = pandas.concat([trdf, tedf])
self.assertEqual(df_exp.shape, df_val.shape)
df_val = df_val.sort_values("cint").reset_index(drop=True)
self.assertEqualDataFrame(df_val, df_exp)
def test_merge(self):
def compares(a, b, how):
m = a.merge(b, on="cint", indicator=True)
dm = m.to_dataframe()
da = a.to_dataframe()
db = b.to_dataframe()
exp = da.merge(db, on="cint", indicator=True)
self.assertEqualDataFrame(dm.reset_index(drop=True),
exp.reset_index(drop=True))
sdf20 = dummy_streaming_dataframe(20)
sdf30 = dummy_streaming_dataframe(30)
# itself
hows = "inner left right outer".split()
for how in hows:
compares(sdf20, sdf20, how)
compares(sdf20, sdf20, how)
for how in hows:
compares(sdf20, sdf30, how)
compares(sdf20, sdf30, how)
for how in hows:
compares(sdf30, sdf20, how)
compares(sdf30, sdf20, how)
sdf20.merge(sdf20.to_dataframe(), on="cint", indicator=True)
def test_concatv(self):
sdf20 = dummy_streaming_dataframe(20)
sdf30 = dummy_streaming_dataframe(30)
df20 = sdf20.to_dataframe()
df30 = sdf30.to_dataframe()
df = pandas.concat([df20, df30], axis=0)
m1 = sdf20.concat(sdf30, axis=0)
self.assertEqualDataFrame(m1.to_dataframe(), df)
m1 = sdf20.concat(df30, axis=0)
self.assertEqualDataFrame(m1.to_dataframe(), df)
m1 = sdf20.concat(map(lambda x: x, [df30]), axis=0)
self.assertEqualDataFrame(m1.to_dataframe(), df)
m1 = sdf20.concat(map(lambda x: x, [df30]), axis=0)
self.assertEqualDataFrame(m1.to_dataframe(), df)
df30["g"] = 4
self.assertRaise(lambda: sdf20.concat(df30).to_dataframe(),
ValueError, "Frame others[0] do not have the same column names")
df20["cint"] = df20["cint"].astype(float)
self.assertRaise(lambda: sdf20.concat(df20).to_dataframe(),
ValueError, "Frame others[0] do not have the same column types")
def test_concath(self):
sdf20 = dummy_streaming_dataframe(20)
sdf30 = dummy_streaming_dataframe(20)
df20 = sdf20.to_dataframe()
df30 = sdf30.to_dataframe()
df = pandas.concat([df20, df30], axis=1)
m1 = sdf20.concat(sdf30, axis=1)
self.assertEqualDataFrame(m1.to_dataframe(), df)
sdf22 = dummy_streaming_dataframe(22)
sdf25 = dummy_streaming_dataframe(25)
self.assertRaise(lambda: sdf22.concat(sdf25, axis=1).to_dataframe(),
RuntimeError)
def test_groupby(self):
df20 = dummy_streaming_dataframe(20).to_dataframe()
df20["key"] = df20["cint"].apply(lambda i: i % 3 == 0)
sdf20 = StreamingDataFrame.read_df(df20, chunksize=5)
gr = sdf20.groupby("key", lambda gr: gr.sum())
gr2 = df20.groupby("key").sum()
self.assertEqualDataFrame(gr, gr2)
self.assertRaise(lambda: sdf20.groupby(
"key", in_memory=False), NotImplementedError)
# Do not replace lambda c:sum(c) by sum or...
# pandas.core.base.SpecificationError: Function names must be unique, found multiple named sum
gr2 = df20.groupby("key").agg([numpy.sum, lambda c:sum(c)])
gr = sdf20.groupby("key", lambda gr: gr.agg(
[numpy.sum, lambda c:sum(c)]))
self.assertEqualDataFrame(gr, gr2)
gr = sdf20.groupby("key", lambda gr: gr.count())
gr2 = df20.groupby("key").count()
self.assertEqualDataFrame(gr, gr2)
df = pandas.DataFrame(dict(A=[3, 4, 3], B=[5, 6, 7]))
sdf = StreamingDataFrame.read_df(df)
gr = sdf.groupby("A")
gr2 = df.groupby("A").sum()
self.assertEqualDataFrame(gr, gr2)
def test_groupby_cum(self):
df20 = dummy_streaming_dataframe(20).to_dataframe()
df20["key"] = df20["cint"].apply(lambda i: i % 3 == 0)
sdf20 = StreamingDataFrame.read_df(df20, chunksize=5)
sgr = sdf20.groupby_streaming(
"key", lambda gr: gr.sum(), strategy='cum', as_index=False)
gr2 = df20.groupby("key", as_index=False).sum()
lastgr = None
for gr in sgr:
self.assertEqual(list(gr.columns), list(gr2.columns))
lastgr = gr
self.assertEqualDataFrame(lastgr, gr2)
def test_groupby_streaming(self):
df20 = dummy_streaming_dataframe(20).to_dataframe()
df20["key"] = df20["cint"].apply(lambda i: i % 3 == 0)
sdf20 = StreamingDataFrame.read_df(df20, chunksize=5)
sgr = sdf20.groupby_streaming(
"key", lambda gr: gr.sum(), strategy='streaming', as_index=False)
gr2 = df20.groupby("key", as_index=False).sum()
grs = [gr for gr in sgr]
gr = pandas.concat(grs).groupby("key", as_index=False).sum()
self.assertEqualDataFrame(gr, gr2)
def test_groupby_cum_asindex(self):
df20 = dummy_streaming_dataframe(20).to_dataframe()
df20["key"] = df20["cint"].apply(lambda i: i % 3 == 0)
sdf20 = StreamingDataFrame.read_df(df20, chunksize=5)
sgr = sdf20.groupby_streaming(
"key", lambda gr: gr.sum(), strategy='cum', as_index=True)
gr2 = df20.groupby("key", as_index=True).sum()
lastgr = None
for gr in sgr:
self.assertEqual(list(gr.columns), list(gr2.columns))
lastgr = gr
self.assertEqualDataFrame(lastgr, gr2)
def test_merge_2(self):
df = pandas.DataFrame(data=dict(X=[4.5, 6, 7], Y=["a", "b", "c"]))
df2 = pandas.concat([df, df])
sdf = StreamingDataFrame.read_df(df)
sdf2 = sdf.concat(sdf, axis=0)
self.assertEqualDataFrame(df2, sdf2.to_dataframe())
self.assertEqualDataFrame(df2, sdf2.to_dataframe())
m = pandas.DataFrame(dict(Y=["a", "b"], Z=[10, 20]))
jm = df2.merge(m, left_on="Y", right_on="Y", how="outer")
sjm = sdf2.merge(m, left_on="Y", right_on="Y", how="outer")
self.assertEqualDataFrame(jm.sort_values(["X", "Y"]).reset_index(drop=True),
sjm.to_dataframe().sort_values(["X", "Y"]).reset_index(drop=True))
def test_schema_consistant(self):
df = pandas.DataFrame([dict(cf=0, cint=0, cstr="0"), dict(cf=1, cint=1, cstr="1"),
dict(cf=2, cint="s2", cstr="2"), dict(cf=3, cint=3, cstr="3")])
temp = get_temp_folder(__file__, "temp_schema_consistant")
name = os.path.join(temp, "df.csv")
df.to_csv(name, index=False)
self.assertEqual(df.shape, (4, 3))
sdf = StreamingDataFrame.read_csv(name, chunksize=2)
self.assertRaise(lambda: list(sdf), StreamingDataFrameSchemaError)
sdf = StreamingDataFrame.read_csv(
name, chunksize=2, check_schema=False)
pieces = list(sdf)
self.assertEqual(len(pieces), 2)
def test_getitem(self):
sdf = dummy_streaming_dataframe(100)
sdf2 = sdf[["cint"]]
self.assertEqual(sdf2.shape, (100, 1))
df1 = sdf.to_df()
df2 = sdf2.to_df()
self.assertEqualDataFrame(df1[["cint"]], df2)
self.assertRaise(lambda: sdf["cint"], NotImplementedError)
self.assertRaise(lambda: sdf[:, "cint"], NotImplementedError)
def test_read_csv_names(self):
this = os.path.abspath(os.path.dirname(__file__))
data = os.path.join(this, "data", "buggy_hash2.csv")
df = pandas.read_csv(data, sep="\t", names=[
"A", "B", "C"], header=None)
sdf = StreamingDataFrame.read_csv(
data, sep="\t", names=["A", "B", "C"], chunksize=2, header=None)
head = sdf.head(n=1)
self.assertEqualDataFrame(df.head(n=1), head)
def test_add_column(self):
df = pandas.DataFrame(data=dict(X=[4.5, 6, 7], Y=["a", "b", "c"]))
sdf = StreamingDataFrame.read_df(df)
sdf2 = sdf.add_column("d", lambda row: int(1))
df2 = sdf2.to_dataframe()
df["d"] = 1
self.assertEqualDataFrame(df, df2)
sdf3 = StreamingDataFrame.read_df(df)
sdf4 = sdf3.add_column("dd", 2)
df4 = sdf4.to_dataframe()
df["dd"] = 2
self.assertEqualDataFrame(df, df4)
sdfA = StreamingDataFrame.read_df(df)
sdfB = sdfA.add_column("dd12", lambda row: row["dd"] + 10)
dfB = sdfB.to_dataframe()
df["dd12"] = 12
self.assertEqualDataFrame(df, dfB)
def test_fillna(self):
df = pandas.DataFrame(
data=dict(X=[4.5, numpy.nan, 7], Y=["a", "b", numpy.nan]))
sdf = StreamingDataFrame.read_df(df)
df2 = pandas.DataFrame(
data=dict(X=[4.5, 10.0, 7], Y=["a", "b", "NAN"]))
na = sdf.fillna(value=dict(X=10.0, Y="NAN"))
ndf = na.to_df()
self.assertEqual(ndf, df2)
df3 = pandas.DataFrame(
data=dict(X=[4.5, 10.0, 7], Y=["a", "b", numpy.nan]))
na = sdf.fillna(value=dict(X=10.0))
ndf = na.to_df()
self.assertEqual(ndf, df3)
if __name__ == "__main__":
TestStreamingDataFrame().test_apply()
unittest.main()
|
[
"[email protected]"
] | |
4a7a97e46437fe38cad331c51922a344587cc1e3
|
3432efd194137e1d0cb05656eb547c9992229f02
|
/django/pytest/test5/test5/settings.py
|
19b1d23ae1de415200eae15591fceee0dd6403bd
|
[] |
no_license
|
zhanganxia/other_code
|
31747d7689ae1e91fcf3f9f758df130246e7d495
|
8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa
|
refs/heads/master
| 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,946 |
py
|
"""
Django settings for test5 project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vp2*ll9@-#nv1q)1$lor5g+6xol4v2ql22&rq&lkgng&x1musf'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'booktest',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
# 'booktest.middleware.my_mid',
)
ROOT_URLCONF = 'test5.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test5.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test2',
'USER': 'test',
'PASSWORD': 'mysql',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# STATIC_URL = '/abc/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
MEDIA_ROOT = os.path.join(BASE_DIR,"static/media")
|
[
"[email protected]"
] | |
942e8bf47622d8a6e758e7280fef2995844ceadc
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02775/s986607651.py
|
c3546065a4b749bfa2bed51b4f2688ba876de31b
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
s=input()
dp0=[0]*(len(s)+1)
dp1=[0]*(len(s)+1)
dp1[0]=1
for i in range(1,len(s)+1):
n=int(s[i-1:i])
dp0[i]=min(dp0[i-1]+n,dp1[i-1]+10-n)
dp1[i]=min(dp0[i-1]+(1 if n+1==10 else n+1),dp1[i-1]+10-n-1)
print(dp0[-1])
|
[
"[email protected]"
] | |
3ec0809e0b6b7fba80e5361b491f4b5848f0fffb
|
4012f290d83ae7f4c09d7440f26d2acd7e63efbe
|
/2705.py
|
cfa9d3e09805b3d8318fdf0725d78d909b7a3f91
|
[] |
no_license
|
jinaur/codeup
|
ffc2d0fdf73892c1f46d80021ad8f4c1293c9e2e
|
5f75ace909e2b3151171932cc3ee9f3c49dd46d9
|
refs/heads/master
| 2023-04-15T07:42:06.244806 | 2021-04-25T13:59:42 | 2021-04-25T13:59:42 | 277,760,813 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 437 |
py
|
n = int(input())
l, t = list(map(int, input().split()))
a = list(map(int, input().split()))
r = 0
count = 0
i_count = 0
for i in range(0, n) :
if a[i] < i_count + count :
continue
else :
count = 0
for j in range(1, l+1) :
if i >= n-j :
break
if a[i] == a[i+j] :
i_count = a[i]
count += t
break
if count == 0 :
r += 10000
print(r)
|
[
"[email protected]"
] | |
6286d7f5c5b65c4e326259f61aa62cddd8a69af4
|
c39f999cae8825afe2cdf1518d93ba31bd4c0e95
|
/PYME/DSView/modules/psfTools.py
|
2f63492b1f9c133f8585b3a6ecc0ab1ce6d6ccc0
|
[] |
no_license
|
WilliamRo/CLipPYME
|
0b69860136a9b2533f2f29fc29408d7471cb934d
|
6596167034c727ad7dad0a741dd59e0e48f6852a
|
refs/heads/master
| 2023-05-11T09:50:58.605989 | 2023-05-09T02:17:47 | 2023-05-09T02:17:47 | 60,789,741 | 3 | 1 | null | 2016-06-17T08:52:44 | 2016-06-09T16:30:14 |
Python
|
UTF-8
|
Python
| false | false | 13,939 |
py
|
#!/usr/bin/python
##################
# coloc.py
#
# Copyright David Baddeley, 2011
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
#import numpy
import wx
import wx.grid
#import pylab
#from PYME.DSView.image import ImageStack
try:
from enthought.traits.api import HasTraits, Float, Int
from enthought.traits.ui.api import View, Item
from enthought.traits.ui.menu import OKButton
except ImportError:
from traits.api import HasTraits, Float, Int
from traitsui.api import View, Item
from traitsui.menu import OKButton
from graphViewPanel import *
from PYME.PSFEst import psfQuality
def remove_newlines(s):
s = '<>'.join(s.split('\n\n'))
s = ' '.join(s.split())
return '\n'.join(s.split('<>'))
class PSFQualityPanel(wx.Panel):
def __init__(self, dsviewer):
wx.Panel.__init__(self, dsviewer)
self.image = dsviewer.image
self.dsviewer = dsviewer
vsizer = wx.BoxSizer(wx.VERTICAL)
self.grid = wx.grid.Grid(self, -1)
self.grid.CreateGrid(len(psfQuality.test_names), 2)
self.grid.EnableEditing(0)
self.grid.SetColLabelValue(0, "Localisation")
self.grid.SetColLabelValue(1, "Deconvolution")
for i, testName in enumerate(psfQuality.test_names):
self.grid.SetRowLabelValue(i, testName)
self.FillGrid()
self.grid.AutoSizeColumns()
self.grid.SetRowLabelSize(wx.grid.GRID_AUTOSIZE)
vsizer.Add(self.grid, 2, wx.EXPAND|wx.ALL, 5)
vsizer.Add(wx.StaticText(self, -1, 'Click a cell for description'), 0, wx.ALL, 5)
self.description = wx.TextCtrl(self, -1, '', style = wx.TE_MULTILINE|wx.TE_AUTO_SCROLL|wx.TE_READONLY)
vsizer.Add(self.description, 1, wx.EXPAND|wx.ALL, 5)
self.grid.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, self.OnSelectCell)
self.SetSizerAndFit(vsizer)
def OnSelectCell(self, event):
r = event.GetRow()
c = event.GetCol()
self.description.SetValue('')
name = psfQuality.test_names[r]
if c == 0:
#localisaitons
try:
self.description.SetValue(remove_newlines(psfQuality.localisation_tests[name].__doc__))
except KeyError:
pass
elif c == 1:
#deconvolution
try:
self.description.SetValue(remove_newlines(psfQuality.deconvolution_tests[name].__doc__))
except KeyError:
pass
event.Skip()
def FillGrid(self, caller=None):
loc_res, dec_res = psfQuality.runTests(self.image, self.dsviewer.crbv)
for i, testName in enumerate(psfQuality.test_names):
try:
val, merit = loc_res[testName]
colour = psfQuality.colour(merit)
self.grid.SetCellValue(i, 0, '%3.3g' % val)
self.grid.SetCellBackgroundColour(i, 0, tuple(colour*255))
except KeyError:
pass
try:
val, merit = dec_res[testName]
colour = psfQuality.colour(merit)
self.grid.SetCellValue(i, 1, '%3.3g' % val)
self.grid.SetCellBackgroundColour(i, 1, tuple(colour*255))
except KeyError:
pass
class CRBViewPanel(wx.Panel):
def __init__(self, parent, image, background=1):
wx.Panel.__init__(self, parent)
self.image = image
self.background = background
sizer1 = wx.BoxSizer(wx.VERTICAL)
self.figure = Figure()
self.axes = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self, -1, self.figure)
sizer1.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, 'Background photons:', pos = (0,0)), 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.tBackground = wx.TextCtrl(self, -1, '%d' % self.background, pos=(0, 0))
hsizer.Add(self.tBackground, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.tBackground.Bind(wx.EVT_TEXT, self.OnChangeBackground)
sizer1.Add(hsizer)
self.Bind(wx.EVT_SIZE, self._onSize)
#self.toolbar.update()
self.SetSizerAndFit(sizer1)
self.calcCRB()
#self.draw()
def OnChangeBackground(self, event):
print('b')
self.background = float(self.tBackground.GetValue())
self.calcCRB()
def calcCRB(self, caller=None):
from PYME.Analysis import cramerRao
from PYME.PSFGen import fourierHNA
#print 'b'
import numpy as np
d = self.image.data[:,:,:]
I = d[:,:,d.shape[2]/2].sum()
vs = 1e3*np.array([self.image.mdh['voxelsize.x'], self.image.mdh['voxelsize.y'],self.image.mdh['voxelsize.z']])
#print 'fi'
FI = cramerRao.CalcFisherInformZn2(d*(2e3/I) + self.background, 100, voxelsize=vs)
#print 'crb'
self.crb = cramerRao.CalcCramerReoZ(FI)
#print 'crbd'
z_ = np.arange(d.shape[2])*self.image.mdh['voxelsize.z']*1.0e3
self.z_ = z_ - z_.mean()
ps_as = fourierHNA.GenAstigPSF(self.z_, vs[0], 2)
I = ps_as[:,:,ps_as.shape[2]/2].sum()
self.crb_as = (cramerRao.CalcCramerReoZ(cramerRao.CalcFisherInformZn2(ps_as*2000/I + self.background, 500, voxelsize=vs)))
self.draw()
def draw(self, event=None):
self.axes.cla()
self.axes.plot(self.z_, np.sqrt(self.crb[:,0]), label='x')
self.axes.plot(self.z_, np.sqrt(self.crb[:,1]), label='y')
self.axes.plot(self.z_, np.sqrt(self.crb[:,2]), label='z')
self.axes.legend()
self.axes.set_xlabel('Defocus [nm]')
self.axes.set_ylabel('Std. Dev. [nm]')
self.axes.set_title('Cramer-Rao bound for 2000 photons')
crb_as = np.sqrt(self.crb_as)
self.axes.plot(self.z_, crb_as[:,0], 'b:')
self.axes.plot(self.z_, crb_as[:,1], 'g:')
self.axes.plot(self.z_, crb_as[:,2], 'r:')
self.canvas.draw()
def _onSize( self, event ):
#self._resizeflag = True
self._SetSize()
def _SetSize( self ):
pixels = tuple( self.GetClientSize() )
self.SetSize( pixels )
self.canvas.SetSize( pixels )
self.figure.set_size_inches( float( pixels[0] )/self.figure.get_dpi(),
float( pixels[1] )/self.figure.get_dpi() )
class PSFTools(HasTraits):
wavelength = Float(700)
NA = Float(1.49)
pupilSize = Float(0)
iterations = Int(50)
view = View(Item('wavelength'),
Item('NA'),
Item('pupilSize'),
Item('iterations'), buttons=[OKButton])
def __init__(self, dsviewer):
self.dsviewer = dsviewer
self.do = dsviewer.do
self.image = dsviewer.image
dsviewer.AddMenuItem('Processing', "Extract &Pupil Function", self.OnExtractPupil)
dsviewer.AddMenuItem('Processing', "Cramer-Rao Bound vs Background ", self.OnCalcCRB3DvsBG)
dsviewer.AddMenuItem('Processing', "PSF Background Correction", self.OnSubtractBackground)
#wx.EVT_MENU(dsviewer, PROC_LABEL, self.OnLabel)
def OnExtractPupil(self, event):
import numpy as np
import pylab
from PYME.PSFGen import fourierHNA
from PYME.DSView.image import ImageStack
from PYME.DSView import ViewIm3D
z_ = np.arange(self.image.data.shape[2])*self.image.mdh['voxelsize.z']*1.e3
z_ -= z_.mean()
self.configure_traits(kind='modal')
pupil = fourierHNA.ExtractPupil(np.maximum(self.image.data[:,:,:] - .001, 0), z_, self.image.mdh['voxelsize.x']*1e3, self.wavelength, self.NA, nIters=self.iterations, size=self.pupilSize)
pylab.figure()
pylab.subplot(121)
pylab.imshow(np.abs(pupil), interpolation='nearest')
pylab.subplot(122)
pylab.imshow(np.angle(pupil)*(np.abs(pupil) > 0), interpolation='nearest')
pupil = pupil*(np.abs(pupil) > 0)
#im = ImageStack([np.abs(pupil), np.angle(pupil)*(np.abs(pupil) > 0)], titleStub = 'Extracted Pupil')
im = ImageStack(pupil, titleStub = 'Extracted Pupil')
im.mdh.copyEntriesFrom(self.image.mdh)
im.mdh['Parent'] = self.image.filename
#im.mdh['Processing.CropROI'] = roi
mode = 'pupil'
dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas, parent=wx.GetTopLevelParent(self.dsviewer))
def OnSubtractBackground(self, event):
from PYME.DSView.image import ImageStack
from PYME.DSView import ViewIm3D
from PYME.PSFEst import extractImages
d_bg = extractImages.backgroundCorrectPSFWF(self.image.data[:,:,:])
im = ImageStack(d_bg, titleStub = 'Filtered Image')
im.mdh.copyEntriesFrom(self.image.mdh)
im.mdh['Parent'] = self.image.filename
dv = ViewIm3D(im, mode='psf', glCanvas=self.dsviewer.glCanvas)
def OnCalcCRB(self, event):
#print 'f'
from PYME.Analysis import cramerRao
from PYME.PSFGen import fourierHNA
#print 'b'
import numpy as np
d = self.image.data[:,:,:]
I = d[:,:,d.shape[2]/2].sum()
vs = 1e3*np.array([self.image.mdh['voxelsize.x'], self.image.mdh['voxelsize.y'],self.image.mdh['voxelsize.z']])
#print 'fi'
FI = cramerRao.CalcFisherInformZn2(d*(2e3/I), 100, voxelsize=vs)
#print 'crb'
crb = cramerRao.CalcCramerReoZ(FI)
#print 'crbd'
import pylab
z_ = np.arange(d.shape[2])*self.image.mdh['voxelsize.z']*1.0e3
z_ = z_ - z_.mean()
print('p')
pylab.figure()
pylab.plot(z_, np.sqrt(crb[:,0]), label='x')
pylab.plot(z_, np.sqrt(crb[:,1]), label='y')
pylab.plot(z_, np.sqrt(crb[:,2]), label='z')
pylab.legend()
pylab.xlabel('Defocus [nm]')
pylab.ylabel('Std. Dev. [nm]')
pylab.title('Cramer-Rao bound for 2000 photons')
ps_as = fourierHNA.GenAstigPSF(z_, vs[0], 2)
I = ps_as[:,:,ps_as.shape[2]/2].sum()
crb_as = np.sqrt(cramerRao.CalcCramerReoZ(cramerRao.CalcFisherInformZn2(ps_as*2000/I, 500, voxelsize=vs)))
pylab.plot(z_, crb_as[:,0], 'b:')
pylab.plot(z_, crb_as[:,1], 'g:')
pylab.plot(z_, crb_as[:,2], 'r:')
def OnCalcCRB3DvsBG(self, event):
from PYME.Analysis import cramerRao
from PYME.PSFGen import fourierHNA
#print 'b'
import numpy as np
vs = 1e3*np.array([self.image.mdh['voxelsize.x'], self.image.mdh['voxelsize.y'],self.image.mdh['voxelsize.z']])
zf = self.image.data.shape[2]/2
dz = 500/vs[2]
d = self.image.data[:,:,(zf-dz):(zf + dz + 1)]
I = d[:,:,d.shape[2]/2].sum()
bgv = np.logspace(-1, 2)
z_ = np.arange(d.shape[2])*vs[2]
z_ = z_ - z_.mean()
ps_as = fourierHNA.GenAstigPSF(z_, vs[0], 2)
Ias = ps_as[:,:,ps_as.shape[2]/2].sum()
crb3D = []
crb3Das = []
for bg in bgv:
FI = cramerRao.CalcFisherInformZn2(d*(2e3/I) + bg, 100, voxelsize=vs)
crb = cramerRao.CalcCramerReoZ(FI)
crb_as = (cramerRao.CalcCramerReoZ(cramerRao.CalcFisherInformZn2(ps_as*2000/Ias + bg, 500, voxelsize=vs)))
crb3D.append(np.sqrt(crb.sum(1)).mean())
crb3Das.append(np.sqrt(crb_as.sum(1)).mean())
import pylab
pylab.figure()
pylab.plot(bgv, crb3Das, label='Theoretical PSF')
pylab.plot(bgv, crb3D, label='Measured PSF')
pylab.legend()
pylab.xlabel('Background [photons]')
pylab.ylabel('Average CRB 3D')
pylab.title('Cramer-Rao bound vs Background')
def Plug(dsviewer):
dsviewer.PSFTools = PSFTools(dsviewer)
if dsviewer.do.ds.shape[2] > 1:
dsviewer.crbv = CRBViewPanel(dsviewer, dsviewer.image)
dsviewer.dataChangeHooks.append(dsviewer.crbv.calcCRB)
dsviewer.psfqp = PSFQualityPanel(dsviewer)
dsviewer.dataChangeHooks.append(dsviewer.psfqp.FillGrid)
#dsviewer.AddPage(dsviewer.psfqp, False, 'PSF Quality')
dsviewer.AddPage(dsviewer.crbv, False, 'Cramer-Rao Bounds')
pinfo1 = aui.AuiPaneInfo().Name("psfQPanel").Left().Caption('PSF Quality').DestroyOnClose(True).CloseButton(False).MinimizeButton(True).MinimizeMode(aui.AUI_MINIMIZE_CAPT_SMART|aui.AUI_MINIMIZE_POS_RIGHT)#.MinimizeButton(True).MinimizeMode(aui.AUI_MINIMIZE_CAPT_SMART|aui.AUI_MINIMIZE_POS_RIGHT)#.CaptionVisible(False)
dsviewer._mgr.AddPane(dsviewer.psfqp, pinfo1)
dsviewer._mgr.Update()
|
[
"[email protected]"
] | |
2f8c934b78b3d3a3e7e7e52ba27a85a4e8cc7054
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/utils/tests/test_taskpool.py
|
de0fd4919e05832faff78e2270f197ead660328b
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405 | 2023-08-31T03:20:38 | 2023-08-31T03:20:38 | 71,932,349 | 29,482 | 5,669 |
Apache-2.0
| 2023-09-14T21:48:14 | 2016-10-25T19:38:30 |
Python
|
UTF-8
|
Python
| false | false | 5,151 |
py
|
import unittest
from unittest.mock import patch
import ray
from ray.rllib.utils.actors import TaskPool
def createMockWorkerAndObjectRef(obj_ref):
return ({obj_ref: 1}, obj_ref)
class TaskPoolTest(unittest.TestCase):
@patch("ray.wait")
def test_completed_prefetch_yieldsAllComplete(self, rayWaitMock):
task1 = createMockWorkerAndObjectRef(1)
task2 = createMockWorkerAndObjectRef(2)
# Return the second task as complete and the first as pending
rayWaitMock.return_value = ([2], [1])
pool = TaskPool()
pool.add(*task1)
pool.add(*task2)
fetched = list(pool.completed_prefetch())
self.assertListEqual(fetched, [task2])
@patch("ray.wait")
def test_completed_prefetch_yieldsAllCompleteUpToDefaultLimit(self, rayWaitMock):
# Load the pool with 1000 tasks, mock them all as complete and then
# check that the first call to completed_prefetch only yields 999
# items and the second call yields the final one
pool = TaskPool()
for i in range(1000):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(1000)), [])
# For this test, we're only checking the object refs
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(999)))
# Finally, check the next iteration returns the final taks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, [999])
@patch("ray.wait")
def test_completed_prefetch_yieldsAllCompleteUpToSpecifiedLimit(self, rayWaitMock):
# Load the pool with 1000 tasks, mock them all as complete and then
# check that the first call to completed_prefetch only yield 999 items
# and the second call yields the final one
pool = TaskPool()
for i in range(1000):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(1000)), [])
# Verify that only the first 500 tasks are returned, this should leave
# some tasks in the _fetching deque for later
fetched = [pair[1] for pair in pool.completed_prefetch(max_yield=500)]
self.assertListEqual(fetched, list(range(500)))
# Finally, check the next iteration returns the remaining tasks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(500, 1000)))
@patch("ray.wait")
def test_completed_prefetch_yieldsRemainingIfIterationStops(self, rayWaitMock):
# Test for issue #7106
# In versions of Ray up to 0.8.1, if the pre-fetch generator failed to
# run to completion, then the TaskPool would fail to clear up already
# fetched tasks resulting in stale object refs being returned
pool = TaskPool()
for i in range(10):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
rayWaitMock.return_value = (list(range(10)), [])
# This should fetch just the first item in the list
try:
for _ in pool.completed_prefetch():
# Simulate a worker failure returned by ray.get()
raise ray.exceptions.RayError
except ray.exceptions.RayError:
pass
# This fetch should return the remaining pre-fetched tasks
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, list(range(1, 10)))
@patch("ray.wait")
def test_reset_workers_pendingFetchesFromFailedWorkersRemoved(self, rayWaitMock):
pool = TaskPool()
# We need to hold onto the tasks for this test so that we can fail a
# specific worker
tasks = []
for i in range(10):
task = createMockWorkerAndObjectRef(i)
pool.add(*task)
tasks.append(task)
# Simulate only some of the work being complete and fetch a couple of
# tasks in order to fill the fetching queue
rayWaitMock.return_value = ([0, 1, 2, 3, 4, 5], [6, 7, 8, 9])
fetched = [pair[1] for pair in pool.completed_prefetch(max_yield=2)]
# As we still have some pending tasks, we need to update the
# completion states to remove the completed tasks
rayWaitMock.return_value = ([], [6, 7, 8, 9])
pool.reset_workers(
[
tasks[0][0],
tasks[1][0],
tasks[2][0],
tasks[3][0],
# OH NO! WORKER 4 HAS CRASHED!
tasks[5][0],
tasks[6][0],
tasks[7][0],
tasks[8][0],
tasks[9][0],
]
)
# Fetch the remaining tasks which should already be in the _fetching
# queue
fetched = [pair[1] for pair in pool.completed_prefetch()]
self.assertListEqual(fetched, [2, 3, 5])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"[email protected]"
] | |
3838bf7b8309dce816bfa0285dca3bdb5173f0a0
|
3a426d6cd831183fa00a22e426da44692c870f0c
|
/sidekick-seq/sidekick/seq/util.py
|
670f4e19ab6178bea6335d246611acef2e759a05
|
[
"MIT"
] |
permissive
|
fabiommendes/sidekick
|
d399d57f13ae606a99623af22c63a32343d66592
|
993ae7b8496347ad9720d3ff11e10ab946c3a800
|
refs/heads/master
| 2021-07-09T15:48:04.113881 | 2021-06-28T16:44:21 | 2021-06-28T16:44:56 | 98,376,293 | 32 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 797 |
py
|
import itertools
from ..typing import Index
INDEX_DOC = """index:
Either a number that starts an infinite enumeration or a sequence
of indexes that is passed as the first argument to func."""
def to_index_seq(index: Index):
"""
Convert the index argument of many functions to a proper sequence.
"""
if index is False or index is None:
return None
elif index is True:
return itertools.count(0)
elif isinstance(index, int):
return itertools.count(index)
else:
return index
def vargs(args):
"""
Conform function args to a sequence of sequences.
"""
n = len(args)
if n == 1:
return args[0]
elif n == 0:
raise TypeError("no arguments given")
else:
return args
|
[
"[email protected]"
] | |
e5de69aeef1912a706f33d248d0f5177a7659fe7
|
62c6884e9597d96a25d274515d6124c46daffec8
|
/examples/reports/__init__.py
|
0984649742c7f11520b17547eaebd313405fb49e
|
[
"MIT"
] |
permissive
|
doncat99/zvt
|
0f9305442af287e63f15de11cb2e2f6b5f9b3d05
|
831183bdf7a6d0fc3acd3ea51984df590078eec6
|
refs/heads/master
| 2023-03-22T13:35:17.277276 | 2021-03-10T14:02:08 | 2021-03-10T14:02:08 | 284,984,720 | 13 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,753 |
py
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
from sqlalchemy import or_
from zvt.api.data_type import Region
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_pd_timestamp, now_time_str
from zvt.domain import FinanceFactor, BalanceSheet, IncomeStatement
def get_subscriber_emails():
emails_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'subscriber_emails.json'))
with open(emails_file) as f:
return json.load(f)
def risky_company(the_date=to_pd_timestamp(now_time_str()), income_yoy=-0.1, profit_yoy=-0.1, entity_ids=None):
codes = []
start_timestamp = to_pd_timestamp(the_date) - datetime.timedelta(130)
# 营收降,利润降,流动比率低,速动比率低
finance_filter = or_(FinanceFactor.op_income_growth_yoy < income_yoy,
FinanceFactor.net_profit_growth_yoy <= profit_yoy,
FinanceFactor.current_ratio < 0.7,
FinanceFactor.quick_ratio < 0.5)
df = FinanceFactor.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp, filters=[finance_filter],
columns=['code'])
if pd_is_not_null(df):
codes = codes + df.code.tolist()
# 高应收,高存货,高商誉
balance_filter = (BalanceSheet.accounts_receivable + BalanceSheet.inventories + BalanceSheet.goodwill) > BalanceSheet.total_equity
df = BalanceSheet.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp, filters=[balance_filter],
columns=['code'])
if pd_is_not_null(df):
codes = codes + df.code.tolist()
# 应收>利润*1/2
df1 = BalanceSheet.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp,
columns=[BalanceSheet.code, BalanceSheet.accounts_receivable])
if pd_is_not_null(df1):
df1.drop_duplicates(subset='code', keep='last', inplace=True)
df1 = df1.set_index('code', drop=True).sort_index()
df2 = IncomeStatement.query_data(region=Region.CHN, entity_ids=entity_ids, start_timestamp=start_timestamp,
columns=[IncomeStatement.code,
IncomeStatement.net_profit])
if pd_is_not_null(df2):
df2.drop_duplicates(subset='code', keep='last', inplace=True)
df2 = df2.set_index('code', drop=True).sort_index()
if pd_is_not_null(df1) and pd_is_not_null(df2):
codes = codes + df1[df1.accounts_receivable > df2.net_profit / 2].index.tolist()
return list(set(codes))
if __name__ == '__main__':
print(get_subscriber_emails())
|
[
"[email protected]"
] | |
cafb2d5199dac51c6af5f55849443b25a12c4d81
|
1816378da612c7db376934b033e4fd64951338b6
|
/gui/services/migrations/0102_auto__add_field_nfs_nfs_srv_udp.py
|
f1b89b4f6ff1d7c96b614481aab07f36078610ad
|
[] |
no_license
|
quater/freenas-9.2-xen
|
46517a7a23546764347d3c91108c70a8bd648ec6
|
96e580055fa97575f0a0cb23a72495860467bcfb
|
refs/heads/master
| 2021-01-16T22:21:38.781962 | 2014-02-07T05:59:13 | 2014-02-07T05:59:13 | 16,609,785 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 33,441 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
# Adding field 'NFS.nfs_srv_udp'
db.add_column(u'services_nfs', 'nfs_srv_udp',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
orm['services.NFS'].objects.update(nfs_srv_udp=False)
def backwards(self, orm):
# Deleting field 'NFS.nfs_srv_udp'
db.delete_column(u'services_nfs', 'nfs_srv_udp')
models = {
u'services.activedirectory': {
'Meta': {'object_name': 'ActiveDirectory'},
'ad_allow_trusted_doms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ad_bindname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_bindpw': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_dcname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ad_dns_timeout': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'ad_domainname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_gcname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ad_keytab': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ad_kpwdname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ad_krbname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ad_netbiosname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ad_timeout': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'ad_unix_extensions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ad_use_default_domain': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ad_use_keytab': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ad_verbose_logging': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ad_workgroup': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.afp': {
'Meta': {'object_name': 'AFP'},
'afp_srv_connections_limit': ('django.db.models.fields.IntegerField', [], {'default': '50', 'max_length': '120'}),
'afp_srv_guest': ('django.db.models.fields.BooleanField', [], {}),
'afp_srv_guest_user': ('freenasUI.freeadmin.models.fields.UserField', [], {'default': "'nobody'", 'max_length': '120'}),
'afp_srv_homedir': ('freenasUI.freeadmin.models.fields.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'afp_srv_homedir_enable': ('django.db.models.fields.BooleanField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.cifs': {
'Meta': {'object_name': 'CIFS'},
'cifs_srv_aio_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_aio_rs': ('django.db.models.fields.IntegerField', [], {'default': '4096', 'max_length': '120'}),
'cifs_srv_aio_ws': ('django.db.models.fields.IntegerField', [], {'default': '4096', 'max_length': '120'}),
'cifs_srv_authmodel': ('django.db.models.fields.CharField', [], {'default': "'user'", 'max_length': '10'}),
'cifs_srv_description': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_dirmask': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_dosattr': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_doscharset': ('django.db.models.fields.CharField', [], {'default': "'CP437'", 'max_length': '120'}),
'cifs_srv_easupport': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_filemask': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'cifs_srv_guest': ('freenasUI.freeadmin.models.fields.UserField', [], {'default': "'nobody'", 'max_length': '120'}),
'cifs_srv_homedir': ('freenasUI.freeadmin.models.fields.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cifs_srv_homedir_aux': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cifs_srv_homedir_browseable_enable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cifs_srv_homedir_enable': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_hostlookup': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cifs_srv_localmaster': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_loglevel': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '120'}),
'cifs_srv_netbiosname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'cifs_srv_nullpw': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_smb_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'cifs_srv_timeserver': ('django.db.models.fields.BooleanField', [], {}),
'cifs_srv_unixcharset': ('django.db.models.fields.CharField', [], {'default': "'UTF-8'", 'max_length': '120'}),
'cifs_srv_unixext': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cifs_srv_workgroup': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'cifs_srv_zeroconf': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.domaincontroller': {
'Meta': {'object_name': 'DomainController'},
'dc_dns_backend': ('django.db.models.fields.CharField', [], {'default': "'SAMBA_INTERNAL'", 'max_length': '120'}),
'dc_dns_forwarder': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'dc_domain': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'dc_forest_level': ('django.db.models.fields.CharField', [], {'default': "'2003'", 'max_length': '120'}),
'dc_passwd': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'dc_realm': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'dc_role': ('django.db.models.fields.CharField', [], {'default': "'dc'", 'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.dynamicdns': {
'Meta': {'object_name': 'DynamicDNS'},
'ddns_domain': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_fupdateperiod': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ddns_password': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'ddns_provider': ('django.db.models.fields.CharField', [], {'default': "'[email protected]'", 'max_length': '120', 'blank': 'True'}),
'ddns_updateperiod': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ddns_username': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.ftp': {
'Meta': {'object_name': 'FTP'},
'ftp_anonpath': ('freenasUI.freeadmin.models.fields.PathField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ftp_anonuserbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_anonuserdlbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_banner': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ftp_clients': ('django.db.models.fields.PositiveIntegerField', [], {'default': '32'}),
'ftp_defaultroot': ('django.db.models.fields.BooleanField', [], {}),
'ftp_dirmask': ('django.db.models.fields.CharField', [], {'default': "'077'", 'max_length': '3'}),
'ftp_filemask': ('django.db.models.fields.CharField', [], {'default': "'077'", 'max_length': '3'}),
'ftp_fxp': ('django.db.models.fields.BooleanField', [], {}),
'ftp_ident': ('django.db.models.fields.BooleanField', [], {}),
'ftp_ipconnections': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_localuserbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_localuserdlbw': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_loginattempt': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'ftp_masqaddress': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ftp_onlyanonymous': ('django.db.models.fields.BooleanField', [], {}),
'ftp_onlylocal': ('django.db.models.fields.BooleanField', [], {}),
'ftp_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ftp_passiveportsmax': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_passiveportsmin': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ftp_port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '21'}),
'ftp_resume': ('django.db.models.fields.BooleanField', [], {}),
'ftp_reversedns': ('django.db.models.fields.BooleanField', [], {}),
'ftp_rootlogin': ('django.db.models.fields.BooleanField', [], {}),
'ftp_ssltls_certfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ftp_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '120'}),
'ftp_tls': ('django.db.models.fields.BooleanField', [], {}),
'ftp_tls_opt_allow_client_renegotiations': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_allow_dot_login': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_allow_per_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_common_name_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_dns_name_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_enable_diags': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_export_cert_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_ip_address_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_no_cert_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_no_empty_fragments': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_no_session_reuse_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_stdenvvars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_opt_use_implicit_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ftp_tls_policy': ('django.db.models.fields.CharField', [], {'default': "'on'", 'max_length': '120'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'services.iscsitarget': {
'Meta': {'ordering': "['iscsi_target_name']", 'object_name': 'iSCSITarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_alias': ('django.db.models.fields.CharField', [], {'max_length': '120', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'iscsi_target_authgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_target_authtype': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_target_flags': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '120'}),
'iscsi_target_initialdigest': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_target_initiatorgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.iSCSITargetAuthorizedInitiator']"}),
'iscsi_target_logical_blocksize': ('django.db.models.fields.IntegerField', [], {'default': '512', 'max_length': '3'}),
'iscsi_target_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'}),
'iscsi_target_portalgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.iSCSITargetPortal']"}),
'iscsi_target_queue_depth': ('django.db.models.fields.IntegerField', [], {'default': '32', 'max_length': '3'}),
'iscsi_target_serial': ('django.db.models.fields.CharField', [], {'default': "'10000001'", 'max_length': '16'}),
'iscsi_target_type': ('django.db.models.fields.CharField', [], {'default': "'Disk'", 'max_length': '120'})
},
u'services.iscsitargetauthcredential': {
'Meta': {'object_name': 'iSCSITargetAuthCredential'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_auth_peersecret': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_auth_peeruser': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_auth_secret': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_target_auth_tag': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'iscsi_target_auth_user': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'services.iscsitargetauthorizedinitiator': {
'Meta': {'object_name': 'iSCSITargetAuthorizedInitiator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_initiator_auth_network': ('django.db.models.fields.TextField', [], {'default': "'ALL'", 'max_length': '2048'}),
'iscsi_target_initiator_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_initiator_initiators': ('django.db.models.fields.TextField', [], {'default': "'ALL'", 'max_length': '2048'}),
'iscsi_target_initiator_tag': ('django.db.models.fields.IntegerField', [], {'default': '1', 'unique': 'True'})
},
u'services.iscsitargetextent': {
'Meta': {'ordering': "['iscsi_target_extent_name']", 'object_name': 'iSCSITargetExtent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_extent_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_extent_filesize': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '120'}),
'iscsi_target_extent_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '120'}),
'iscsi_target_extent_path': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_target_extent_type': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'services.iscsitargetglobalconfiguration': {
'Meta': {'object_name': 'iSCSITargetGlobalConfiguration'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_basename': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'iscsi_defaultt2r': ('django.db.models.fields.IntegerField', [], {'default': '60', 'max_length': '120'}),
'iscsi_defaultt2w': ('django.db.models.fields.IntegerField', [], {'default': '2', 'max_length': '120'}),
'iscsi_discoveryauthgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_discoveryauthmethod': ('django.db.models.fields.CharField', [], {'default': "'Auto'", 'max_length': '120'}),
'iscsi_firstburst': ('django.db.models.fields.IntegerField', [], {'default': '65536', 'max_length': '120'}),
'iscsi_iotimeout': ('django.db.models.fields.IntegerField', [], {'default': '30', 'max_length': '120'}),
'iscsi_luc_authgroup': ('django.db.models.fields.IntegerField', [], {'max_length': '120', 'null': 'True', 'blank': 'True'}),
'iscsi_luc_authmethod': ('django.db.models.fields.CharField', [], {'default': "'CHAP'", 'max_length': '120', 'blank': 'True'}),
'iscsi_luc_authnetwork': ('django.db.models.fields.CharField', [], {'default': "'127.0.0.0/8'", 'max_length': '120', 'blank': 'True'}),
'iscsi_lucip': ('django.db.models.fields.IPAddressField', [], {'default': "'127.0.0.1'", 'max_length': '15', 'null': 'True', 'blank': 'True'}),
'iscsi_lucport': ('django.db.models.fields.IntegerField', [], {'default': '3261', 'null': 'True', 'blank': 'True'}),
'iscsi_maxburst': ('django.db.models.fields.IntegerField', [], {'default': '262144', 'max_length': '120'}),
'iscsi_maxconnect': ('django.db.models.fields.IntegerField', [], {'default': '8', 'max_length': '120'}),
'iscsi_maxoutstandingr2t': ('django.db.models.fields.IntegerField', [], {'default': '16', 'max_length': '120'}),
'iscsi_maxrecdata': ('django.db.models.fields.IntegerField', [], {'default': '262144', 'max_length': '120'}),
'iscsi_maxsesh': ('django.db.models.fields.IntegerField', [], {'default': '16', 'max_length': '120'}),
'iscsi_nopinint': ('django.db.models.fields.IntegerField', [], {'default': '20', 'max_length': '120'}),
'iscsi_r2t': ('django.db.models.fields.IntegerField', [], {'default': '32', 'max_length': '120'}),
'iscsi_toggleluc': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'services.iscsitargetportal': {
'Meta': {'object_name': 'iSCSITargetPortal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_portal_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'iscsi_target_portal_tag': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '120'})
},
u'services.iscsitargetportalip': {
'Meta': {'unique_together': "(('iscsi_target_portalip_ip', 'iscsi_target_portalip_port'),)", 'object_name': 'iSCSITargetPortalIP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_target_portalip_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'iscsi_target_portalip_port': ('django.db.models.fields.SmallIntegerField', [], {'default': '3260'}),
'iscsi_target_portalip_portal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ips'", 'to': u"orm['services.iSCSITargetPortal']"})
},
u'services.iscsitargettoextent': {
'Meta': {'object_name': 'iSCSITargetToExtent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iscsi_extent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.iSCSITargetExtent']", 'unique': 'True'}),
'iscsi_target': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.iSCSITarget']"})
},
u'services.ldap': {
'Meta': {'object_name': 'LDAP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ldap_anonbind': ('django.db.models.fields.BooleanField', [], {}),
'ldap_basedn': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_groupsuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_hostname': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_machinesuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_passwordsuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_pwencryption': ('django.db.models.fields.CharField', [], {'default': "'clear'", 'max_length': '120'}),
'ldap_rootbasedn': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_rootbindpw': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ldap_ssl': ('django.db.models.fields.CharField', [], {'default': "'off'", 'max_length': '120'}),
'ldap_tls_cacertfile': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ldap_usersuffix': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
},
u'services.nfs': {
'Meta': {'object_name': 'NFS'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nfs_srv_allow_nonroot': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nfs_srv_bindip': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'nfs_srv_mountd_port': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nfs_srv_rpclockd_port': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nfs_srv_rpcstatd_port': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'nfs_srv_servers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '4'}),
'nfs_srv_udp': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'services.nis': {
'Meta': {'object_name': 'NIS'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nis_domain': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'nis_manycast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nis_secure_mode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nis_servers': ('django.db.models.fields.CharField', [], {'max_length': '8192', 'blank': 'True'})
},
u'services.nt4': {
'Meta': {'object_name': 'NT4'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nt4_adminname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'nt4_adminpw': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'nt4_dcname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'nt4_netbiosname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'nt4_workgroup': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'services.rpctoken': {
'Meta': {'object_name': 'RPCToken'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'services.rsyncd': {
'Meta': {'object_name': 'Rsyncd'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rsyncd_auxiliary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncd_port': ('django.db.models.fields.IntegerField', [], {'default': '873'})
},
u'services.rsyncmod': {
'Meta': {'ordering': "['rsyncmod_name']", 'object_name': 'RsyncMod'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rsyncmod_auxiliary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_comment': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'rsyncmod_group': ('freenasUI.freeadmin.models.fields.GroupField', [], {'default': "'nobody'", 'max_length': '120', 'blank': 'True'}),
'rsyncmod_hostsallow': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_hostsdeny': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rsyncmod_maxconn': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'rsyncmod_mode': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '120'}),
'rsyncmod_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'rsyncmod_path': ('freenasUI.freeadmin.models.fields.PathField', [], {'max_length': '255'}),
'rsyncmod_user': ('freenasUI.freeadmin.models.fields.UserField', [], {'default': "'nobody'", 'max_length': '120', 'blank': 'True'})
},
u'services.services': {
'Meta': {'object_name': 'services'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'srv_enable': ('django.db.models.fields.BooleanField', [], {}),
'srv_service': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'services.smart': {
'Meta': {'object_name': 'SMART'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smart_critical': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_difference': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'smart_informational': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'smart_interval': ('django.db.models.fields.IntegerField', [], {'default': '30'}),
'smart_powermode': ('django.db.models.fields.CharField', [], {'default': "'never'", 'max_length': '60'})
},
u'services.snmp': {
'Meta': {'object_name': 'SNMP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snmp_community': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '120'}),
'snmp_contact': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'snmp_location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'snmp_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'snmp_traps': ('django.db.models.fields.BooleanField', [], {})
},
u'services.ssh': {
'Meta': {'object_name': 'SSH'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ssh_compression': ('django.db.models.fields.BooleanField', [], {}),
'ssh_host_dsa_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_dsa_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_ecdsa_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_ecdsa_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_rsa_key': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_host_rsa_key_pub': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'ssh_options': ('django.db.models.fields.TextField', [], {'max_length': '120', 'blank': 'True'}),
'ssh_passwordauth': ('django.db.models.fields.BooleanField', [], {}),
'ssh_privatekey': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'ssh_rootlogin': ('django.db.models.fields.BooleanField', [], {}),
'ssh_sftp_log_facility': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'ssh_sftp_log_level': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'ssh_tcpfwd': ('django.db.models.fields.BooleanField', [], {}),
'ssh_tcpport': ('django.db.models.fields.PositiveIntegerField', [], {'default': '22'})
},
u'services.tftp': {
'Meta': {'object_name': 'TFTP'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tftp_directory': ('freenasUI.freeadmin.models.fields.PathField', [], {'max_length': '255'}),
'tftp_newfiles': ('django.db.models.fields.BooleanField', [], {}),
'tftp_options': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'tftp_port': ('django.db.models.fields.PositiveIntegerField', [], {'default': '69'}),
'tftp_umask': ('django.db.models.fields.CharField', [], {'default': "'022'", 'max_length': '120'}),
'tftp_username': ('freenasUI.freeadmin.models.fields.UserField', [], {'default': "'nobody'", 'max_length': '120'})
},
u'services.ups': {
'Meta': {'object_name': 'UPS'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ups_description': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_driver': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_emailnotify': ('django.db.models.fields.BooleanField', [], {}),
'ups_extrausers': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ups_identifier': ('django.db.models.fields.CharField', [], {'default': "'ups'", 'max_length': '120'}),
'ups_mode': ('django.db.models.fields.CharField', [], {'default': "'master'", 'max_length': '6'}),
'ups_monpwd': ('django.db.models.fields.CharField', [], {'default': "'fixmepass'", 'max_length': '30'}),
'ups_monuser': ('django.db.models.fields.CharField', [], {'default': "'upsmon'", 'max_length': '50'}),
'ups_options': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ups_port': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'}),
'ups_remotehost': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'ups_remoteport': ('django.db.models.fields.IntegerField', [], {'default': '3493', 'blank': 'True'}),
'ups_rmonitor': ('django.db.models.fields.BooleanField', [], {}),
'ups_shutdown': ('django.db.models.fields.CharField', [], {'default': "'batt'", 'max_length': '120'}),
'ups_shutdowntimer': ('django.db.models.fields.IntegerField', [], {'default': '30'}),
'ups_subject': ('django.db.models.fields.CharField', [], {'default': "'UPS report generated by %h'", 'max_length': '120'}),
'ups_toemail': ('django.db.models.fields.CharField', [], {'max_length': '120', 'blank': 'True'})
}
}
complete_apps = ['services']
|
[
"[email protected]"
] | |
9846bc35bfa3391fc47e58f9a2879889ab9fa42e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/17/usersdata/129/7024/submittedfiles/lecker.py
|
41f8857dcf14a6238bfb49ef35648401bdecabca
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 428 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('Digite o valor: ')
b=input('Digite o valor: ')
c=input('Digite o valor: ')
d=input('Digite o valor: ')
if a>b and a<b>c:
print ('N')
elif a>b and b<c>d :
print ('N')
elif a>b and c<d:
print ('N')
elif a<b>c and b<c>d:
print ('N')
elif a<b>c and c<d:
print ('N')
elif b<c>d and c<d:
print ('N')
else:
print ('S')
|
[
"[email protected]"
] | |
5e28c0093f7c78ce2b10fae07e900c56f374c650
|
83de24182a7af33c43ee340b57755e73275149ae
|
/aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/UpdateEdgeInstanceRequest.py
|
c43811f3b4e4b5c6c68fbb0615a7607275f92b02
|
[
"Apache-2.0"
] |
permissive
|
aliyun/aliyun-openapi-python-sdk
|
4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f
|
83fd547946fd6772cf26f338d9653f4316c81d3c
|
refs/heads/master
| 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 |
NOASSERTION
| 2023-09-14T08:51:06 | 2015-07-23T09:39:45 |
Python
|
UTF-8
|
Python
| false | false | 2,204 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class UpdateEdgeInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'UpdateEdgeInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BizEnable(self):
return self.get_query_params().get('BizEnable')
def set_BizEnable(self,BizEnable):
self.add_query_param('BizEnable',BizEnable)
def get_Spec(self):
return self.get_query_params().get('Spec')
def set_Spec(self,Spec):
self.add_query_param('Spec',Spec)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_Tags(self):
return self.get_query_params().get('Tags')
def set_Tags(self,Tags):
self.add_query_param('Tags',Tags)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
|
[
"[email protected]"
] | |
eb25e9c4b639d7a0427e13c3c6e14a6bbfec3069
|
dac12c9178b13d60f401c4febff5569af8aa2719
|
/cvat/apps/iam/apps.py
|
4f6979b7a7c54e461d2602f06d537cb0802cec40
|
[
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
opencv/cvat
|
39dc66ca20f972ba40b79c44d7ce43590dc0b0b5
|
899c9fd75146744def061efd7ab1b1c6c9f6942f
|
refs/heads/develop
| 2023-08-19T04:27:56.974498 | 2023-08-18T09:58:25 | 2023-08-18T09:58:25 | 139,156,354 | 6,558 | 1,887 |
MIT
| 2023-09-14T12:44:39 | 2018-06-29T14:02:45 |
TypeScript
|
UTF-8
|
Python
| false | false | 368 |
py
|
from distutils.util import strtobool
import os
from django.apps import AppConfig
from .utils import create_opa_bundle
class IAMConfig(AppConfig):
name = 'cvat.apps.iam'
def ready(self):
from .signals import register_signals
register_signals(self)
if strtobool(os.environ.get("IAM_OPA_BUNDLE", '0')):
create_opa_bundle()
|
[
"[email protected]"
] | |
780ce0fba2444e5a07ea67d6ed14d86a19ea2e4d
|
ec181b840d3462eb43de5682adde38fa3c0ab570
|
/towhee/compiler/backends/__init__.py
|
03b640a4d7eb5f6b90d404c245eb933791a56562
|
[
"Apache-2.0"
] |
permissive
|
towhee-io/towhee-compiler
|
37fc26ec87fc20710d2e1b653b2d83fad0dfc63f
|
e9a724169ae96d3ae73db732ae3d8b4e9e3f9b5c
|
refs/heads/main
| 2023-05-23T07:59:11.217347 | 2022-09-13T11:32:23 | 2022-09-13T11:32:23 | 514,104,716 | 6 | 6 |
Apache-2.0
| 2022-09-13T11:32:24 | 2022-07-15T02:10:13 |
Python
|
UTF-8
|
Python
| false | false | 416 |
py
|
from typing import Callable
from .backend_compiler import BackendCompiler
from .nebullvm_compiler import NebullvmCompiler
def resolve(name: str) -> Callable:
if name in BackendCompiler.backends:
return BackendCompiler.backends[name]()
from torchdynamo.optimizations.backends import BACKENDS
return BACKENDS[name]
__all__ = [
"BackendCompiler",
"NebullvmCompiler",
"resolve",
]
|
[
"[email protected]"
] | |
85d78ffc916b78fe38f01989bed8b03fcf69acd7
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/core/search_space/__init__.py
|
62c21622ed68be03022e38e7e52bef05102fab74
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 |
NOASSERTION
| 2023-02-15T09:37:01 | 2020-06-20T08:20:06 |
Python
|
UTF-8
|
Python
| false | false | 503 |
py
|
from vega.core.search_space.ext_hyper_parameter import IntHyperParameter, FloatHyperParameter, \
FloatExpHyperParameter, IntExpHyperParameter, CatHyperParameter, BoolCatHyperParameter, \
AdjacencyListHyperParameter, BinaryCodeHyperParameter, HalfCodeHyperParameter
from .search_space import SearchSpace, SpaceSet
from .condition_types import ConditionTypes, CONDITION_TYPE_MAP
from .ext_conditions import EqualCondition, NotEqualCondition, InCondition
from .range_generator import AdjacencyList
|
[
"[email protected]"
] | |
20367adb74f550000ac708a28b1b02c267317161
|
ac4ba3868bb87c995772a293360e7cc4e38a3ccc
|
/one_model_chooser_svm.py
|
556e321e0d5d3401ea464633f255dce33b9035b7
|
[] |
no_license
|
greggoren/robustness
|
8d503370f72c91882e205cd7af00d727997a7906
|
d9328bb86d66e4767b6d998125c8ef5a5c540c5e
|
refs/heads/master
| 2021-01-15T13:33:49.425635 | 2018-01-29T20:47:54 | 2018-01-29T20:47:54 | 99,676,406 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 926 |
py
|
import preprocess_clueweb as p
import single_model_handler as mh
import evaluator as e
import params
import sys
if __name__=="__main__":
preprocess = p.preprocess()
X,y,queries=preprocess.retrieve_data_from_file(params.data_set_file,params.normalized)
sys.stdout.flush()
number_of_queries = len(set(queries))
evaluator = e.eval()
evaluator.create_index_to_doc_name_dict()
evaluator.remove_score_file_from_last_run()
sys.stdout.flush()
train,validation = preprocess.create_test_train_split_cluweb(queries)
sys.stdout.flush()
X_i,y_i=preprocess.create_data_set(X[train], y[train], queries[train])
sys.stdout.flush()
C_array = [0.1,0.01,0.001]
single_model_handler = mh.single_model_handler(C_array)
single_model_handler.fit_model_on_train_set_and_choose_best_for_competition(X,y,X_i,y_i,validation,queries,evaluator,preprocess)
print("learning is finished")
|
[
"[email protected]"
] | |
fc39b2c4d90ba07db630ff735ea9f1d228fce7d5
|
5cea76d53779d466f19a5cf0b51e003586cc4a7b
|
/python开发技术详解/源文件/09/9.1.6/assert.py
|
edda342a1eec64a32b4fae7c6342fe51a3b140d9
|
[] |
no_license
|
evan886/python
|
40152fdb4885876189580141abe27a983d04e04d
|
d33e996e93275f6b347ecc2d30f8efe05accd10c
|
refs/heads/master
| 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 |
JavaScript
|
GB18030
|
Python
| false | false | 284 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# assert判断逻辑表达式
t = ("hello",)
assert len(t) >= 1
#t = ("hello")
#assert len(t) == 1
# 带message的assert语句
month = 13
assert 1 <= month <= 12, "month errors"
#assert month >= 1 and month <= 12, "month errors"
|
[
"[email protected]"
] | |
9ab5da089f7593a59cf5768ebba2bee45058e0c4
|
9b57629a451471c0d38fbc1d29373938e4856ed5
|
/pyreto/renderer.py
|
5d2e46bc1dd781d7dcc392699a78034a020e2287
|
[
"Apache-2.0"
] |
permissive
|
rwl/pylon
|
0b9d635f51be6fdf20dbf77b736e3b2f87e76a69
|
916514255db1ae1661406f0283df756baf960d14
|
refs/heads/master
| 2021-01-01T18:34:08.453649 | 2015-07-07T17:38:09 | 2015-07-07T17:38:09 | 107,383 | 15 | 12 | null | 2015-07-07T17:38:09 | 2009-01-14T16:18:21 |
Python
|
UTF-8
|
Python
| false | false | 7,339 |
py
|
#------------------------------------------------------------------------------
# Copyright (C) 2007-2010 Richard Lincoln
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
""" Defines a renderer that is executed as a concurrent thread and displays
aspects of the environment.
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
import time
import threading
import matplotlib
matplotlib.use('WXAgg')
import numpy
import pylab
from pybrain.rl.environments.renderer import Renderer
#------------------------------------------------------------------------------
# "ExperimentRenderer" class:
#------------------------------------------------------------------------------
class ExperimentRenderer(Renderer):
""" Defines a renderer that displays aspects of a market experiment.
"""
# def __init__(self):
# """ Constructs a new ExperimentRenderer.
# """
# super(ExperimentRenderer, self).__init__()
#--------------------------------------------------------------------------
# "Renderer" interface:
#--------------------------------------------------------------------------
def updateData(self, data):
""" Updates the data used by the renderer.
"""
# pylab.ion()
fig = pylab.figure(1)
n_agent = len(data)
idx = 1
for i, adata in enumerate(data):
saxis = fig.add_subplot(3, n_agent, i + 1)
saxis.plot(adata[0])
idx += 1
aaxis = fig.add_subplot(3, n_agent, i + 1 + n_agent)
aaxis.plot(adata[1])
idx += 1
raxis = fig.add_subplot(3, n_agent, i + 1 + (n_agent * 2))
raxis.plot(adata[2])
idx += 1
pylab.show()
# self._render()
def start(self):
""" Wrapper for Thread.start().
"""
self.draw_plot()
super(ExperimentRenderer, self).start()
def _render(self):
""" Calls the render methods.
"""
# self.reward_line.set_ydata(self.reward_data)
def stop(self):
""" Stops the current rendering thread.
"""
pass
#--------------------------------------------------------------------------
# "ExperimentRenderer" interface:
#--------------------------------------------------------------------------
def draw_plot(self):
""" Initialises plots of the environment.
"""
pylab.ion()
fig = pylab.figure(1)
reward_axis = fig.add_subplot(1, 1, 1)
reward_lines = reward_axis.plot([0.0, 1.0], [0.0, 1.0], "mx-")
# self.reward_line = reward_lines[0]
pylab.draw()
#------------------------------------------------------------------------------
# "ParticipantRenderer" class:
#------------------------------------------------------------------------------
class ParticipantRenderer(Renderer):
""" Defines a renderer that displays aspects of a market participant's
environment.
"""
def __init__(self, outdim, indim, intermax=1000):
""" Initialises a new ParticipantRenderer instance.
"""
super(ParticipantRenderer, self).__init__()
# self.dataLock = threading.Lock()
self.stopRequest = False
self.updates = 0
self.state_data = numpy.zeros((outdim, intermax), float)
self.action_data = numpy.zeros((indim, intermax), float)
self.reward_data = numpy.zeros((1, intermax), float)
self.state_lines = []
self.action_lines = []
self.reward_line = None
#--------------------------------------------------------------------------
# "Renderer" interface:
#--------------------------------------------------------------------------
def updateData(self, state_data, action_data, reward_data):
""" Updates the data used by the renderer.
"""
# self.dataLock.acquire()
self.state_data[:, self.updates] = state_data
self.action_data[:, self.updates] = action_data
self.reward_data[0, self.updates] = reward_data
self.updates += 1
self._render()
# self.dataLock.release()
def start(self):
""" Wrapper for Thread.start().
"""
self.draw_plot()
super(ParticipantRenderer, self).start()
# def stop(self):
# """ Stops the current thread.
# """
# pass
# self.dataLock.acquire()
# self.stopRequest = True
# self.dataLock.release()
#--------------------------------------------------------------------------
# "ParticipantRenderer" interface:
#--------------------------------------------------------------------------
def draw_plot(self):
""" Initialises plots of the environment.
"""
pylab.ion()
fig = pylab.figure(1)
# State plot.
# state_axis = fig.add_subplot(3, 1, 1) # numrows, numcols, fignum
# state_axis.title = 'State'
# state_axis.xlabel = 'Time (hours)'
# state_axis.grid = True
# for i in range(self.state_data.shape[0]):
# lines = state_axis.plot(self.state_data[i, 0], "g+-")
# self.state_lines.append(lines[0])
# Action plot.
# action_axis = fig.add_subplot(3, 1, 2)
# action_axis.title = 'Action'
# action_axis.xlabel = 'Time (hours)'
# action_axis.ylabel = 'Price ($/MWh)'
# action_axis.grid = True
# for i in range(self.action_data.shape[0]):
# lines = action_axis.plot(self.action_data[i, 0], "ro-")
# self.action_lines.append(lines[0])
# Reward plot.
reward_axis = fig.add_subplot(3, 1, 3)
# reward_axis.title = 'Reward'
# reward_axis.xlabel = 'Time (hours)'
# reward_axis.ylabel = 'Earnings ($)'
# reward_axis.grid(True)
reward_lines = reward_axis.plot(self.reward_data[0, 0], [0], "mx-")
self.reward_line = reward_lines[0]
pylab.draw()
def _render(self):
""" Calls the render methods.
"""
# while not self.stopRequest:
# self.dataLock.acquire()
# for i, line in enumerate(self.state_lines):
# ydata = self.state_data[i, :]
# line.set_ydata(ydata)
#
# for j, line in enumerate(self.action_lines):
# ydata = self.action_data[j, :]
# line.set_ydata(ydata)
self.reward_line.set_ydata(self.reward_data)
# self.dataLock.release()
# time.sleep(0.05)
# self.stopRequest = False
# EOF -------------------------------------------------------------------------
|
[
"[email protected]"
] | |
7bfc33fa5570e49a7df08895a50f1226f8bcf524
|
e9be8f5c0b4468d29c798a67a56aa15be504a723
|
/home/migrations/0003_customtext_name.py
|
a65a9fbb96ed9638e8ef5a752e341131475ddd4f
|
[] |
no_license
|
crowdbotics-apps/test-23226
|
a73aa026e079de4c3419d85d214dbce66b9137fc
|
c2abc5faac4eba99f2c60a3c2fd2b382700266f3
|
refs/heads/master
| 2023-01-21T00:51:18.149767 | 2020-12-07T02:29:48 | 2020-12-07T02:29:48 | 319,181,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
# Generated by Django 2.2.17 on 2020-12-07 02:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("home", "0002_load_initial_data"),
]
operations = [
migrations.AddField(
model_name="customtext",
name="name",
field=models.TextField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
04341bccd4121da621985612485cabb3d56736b4
|
4235c0baec2f75a9ab74ca2b88f9d05559db7969
|
/posts/migrations/0010_auto_20190422_1123.py
|
98861078292488d5c2a5d8ba7eb6a539183ee499
|
[] |
no_license
|
yooseungju/Fake-Instagram
|
364fc31337449274e0373cce86ebf171cb7c3271
|
d30767aa4f0705150d4fb430def92ae514565969
|
refs/heads/master
| 2020-05-07T21:44:51.621461 | 2019-04-22T08:36:59 | 2019-04-22T08:36:59 | 180,916,794 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 687 |
py
|
# Generated by Django 2.1.8 on 2019-04-22 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0009_auto_20190416_1413'),
]
operations = [
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, unique=True)),
],
),
migrations.AddField(
model_name='post',
name='hashtags',
field=models.ManyToManyField(to='posts.Hashtag'),
),
]
|
[
"[email protected]"
] | |
165df46c501326550a09e354ab5dd3e63c1af15e
|
f10d45aecbfccb3f469ab0c4ae55fc0f256c9004
|
/Functions/compile.py
|
666455168649a3d7f56313f12ddcc095a7f07e45
|
[] |
no_license
|
Do-code-ing/Python_Built-ins
|
c34c1cea19a2cef80ab3a16d050e8825af0feb59
|
03b2f277acde4fce00bb521e3a0b8c0469b39879
|
refs/heads/master
| 2023-07-29T15:30:00.693005 | 2021-09-04T18:48:18 | 2021-09-04T18:48:18 | 354,467,767 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,649 |
py
|
# compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
# source를 편집하거나 번역하여 code나 AST object로 만든다.
# eval, exec을 호출할 때, 실제 처리과정에서 과부하를 줄 수 있기에 compile을 사용
# source : 문자열이나, bytes, AST object여야 한다.
# filename : code를 읽은 file 이름을 제공하면 된다.
# code가 문자열이라면 <string>을 제공하면 된다.
# mode : compile하려는 code 종류를 지정하면 된다.
# 예를 들어, source가 문장의 sequence로 구성되어 있다면 'exec',
# 단일 표현식으로 구성되어 있다면 'eval',
# 단일 대화형으로 구성되어 있다면 'single'.
# 마지막 "single"의 경우, 표현식이 None을 제외한 값을 평가하여 print된다.
# flags, dont_inherit : source를 compile할 때 'future statements'가 어떠한 영향을 미치는지 제어한다.
# 기본값 = 0
# optimize : compiler의 최적화 수준을 지정한다.
# 기본값 = -1
# 조금 더 자세한 내용을 알고 싶다면 아래의 주소에 들어가 보도록 하자.
# https://docs.python.org/ko/3/library/functions.html#compile
# https://www.programiz.com/python-programming/methods/built-in/compile
import ast
statement_a = "int_a + 3"
statement_b = "result = int_a + 3"
int_a = 10
statement_c = open(".\\study_builtins\\compile_doc.py")
filename_a = "<string>"
# 기본값으로 compile
print(compile(statement_a, filename_a, "eval"))
# <code object <module> at 0x00000261660065B0, file "<string>", line 1>
print(compile(statement_b, "statement_b", "single"))
# <code object <module> at 0x00000261660065B0, file "statement_b", line 1>
print(compile(statement_c.read(), "formula_doc", "exec"))
# <code object <module> at 0x00000261660065B0, file "formula_doc", line 1>
# compiler options과 future features, optimize를 넣어 compile
print(compile(statement_a, filename_a, "exec", ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, 0, 2))
# <code object <module> at 0x00000261660065B0, file "<string>", line 1>
# 참고
# 'single' 또는 'eval' mode로 여러 줄 코드를 가진 문자열을 컴파일할 때,
# 적어도 하나의 개행 문자로 입력을 끝내야 한다.
# 이것은 code 모듈에서 문장이 불완전한지 완전한지를 쉽게 탐지하게 하기 위함이다.
# 경고
# 파이썬의 AST compiler에서 스택 깊이 제한으로 인해,
# AST object로 compile할 때 충분히 크고 복잡한 문자열로 인해 python interpreter가 crash를 일으킬 수 있다.
|
[
"[email protected]"
] | |
db6fbc8601791d7f4fbbfa3929297ba4ad395c39
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stdlib/asyncio/subprocess.pyi
|
b8877b3605274c51907e8c6165c7f60aaeee03d0
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028 | 2023-09-02T07:40:35 | 2023-09-02T07:40:35 | 110,274,488 | 6,703 | 575 |
MIT
| 2023-09-13T17:02:32 | 2017-11-10T17:31:36 |
OCaml
|
UTF-8
|
Python
| false | false | 9,322 |
pyi
|
import subprocess
import sys
from _typeshed import StrOrBytesPath
from asyncio import events, protocols, streams, transports
from collections.abc import Callable, Collection
from typing import IO, Any
from typing_extensions import Literal, TypeAlias
__all__ = ("create_subprocess_exec", "create_subprocess_shell")
if sys.version_info >= (3, 8):
_ExecArg: TypeAlias = StrOrBytesPath
else:
_ExecArg: TypeAlias = str | bytes
PIPE: int
STDOUT: int
DEVNULL: int
class SubprocessStreamProtocol(streams.FlowControlMixin, protocols.SubprocessProtocol):
stdin: streams.StreamWriter | None
stdout: streams.StreamReader | None
stderr: streams.StreamReader | None
def __init__(self, limit: int, loop: events.AbstractEventLoop) -> None: ...
def pipe_data_received(self, fd: int, data: bytes | str) -> None: ...
class Process:
stdin: streams.StreamWriter | None
stdout: streams.StreamReader | None
stderr: streams.StreamReader | None
pid: int
def __init__(
self, transport: transports.BaseTransport, protocol: protocols.BaseProtocol, loop: events.AbstractEventLoop
) -> None: ...
@property
def returncode(self) -> int | None: ...
async def wait(self) -> int: ...
def send_signal(self, signal: int) -> None: ...
def terminate(self) -> None: ...
def kill(self) -> None: ...
async def communicate(self, input: bytes | bytearray | memoryview | None = None) -> tuple[bytes, bytes]: ...
if sys.version_info >= (3, 11):
async def create_subprocess_shell(
cmd: str | bytes,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
limit: int = 65536,
*,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
text: Literal[False, None] = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
process_group: int | None = None,
pipesize: int = -1,
) -> Process: ...
async def create_subprocess_exec(
program: _ExecArg,
*args: _ExecArg,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
limit: int = 65536,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
text: bool | None = None,
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
process_group: int | None = None,
pipesize: int = -1,
) -> Process: ...
elif sys.version_info >= (3, 10):
async def create_subprocess_shell(
cmd: str | bytes,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
limit: int = 65536,
*,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
text: Literal[False, None] = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
pipesize: int = -1,
) -> Process: ...
async def create_subprocess_exec(
program: _ExecArg,
*args: _ExecArg,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
limit: int = 65536,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
text: bool | None = None,
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
pipesize: int = -1,
) -> Process: ...
else: # >= 3.9
async def create_subprocess_shell(
cmd: str | bytes,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
loop: events.AbstractEventLoop | None = None,
limit: int = 65536,
*,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
text: Literal[False, None] = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
) -> Process: ...
async def create_subprocess_exec(
program: _ExecArg,
*args: _ExecArg,
stdin: int | IO[Any] | None = None,
stdout: int | IO[Any] | None = None,
stderr: int | IO[Any] | None = None,
loop: events.AbstractEventLoop | None = None,
limit: int = 65536,
# These parameters are forced to these values by BaseEventLoop.subprocess_shell
universal_newlines: Literal[False] = False,
shell: Literal[True] = True,
bufsize: Literal[0] = 0,
encoding: None = None,
errors: None = None,
# These parameters are taken by subprocess.Popen, which this ultimately delegates to
text: bool | None = None,
executable: StrOrBytesPath | None = None,
preexec_fn: Callable[[], Any] | None = None,
close_fds: bool = True,
cwd: StrOrBytesPath | None = None,
env: subprocess._ENV | None = None,
startupinfo: Any | None = None,
creationflags: int = 0,
restore_signals: bool = True,
start_new_session: bool = False,
pass_fds: Collection[int] = ...,
group: None | str | int = None,
extra_groups: None | Collection[str | int] = None,
user: None | str | int = None,
umask: int = -1,
) -> Process: ...
|
[
"[email protected]"
] | |
253b7329e6fc95b64e65cbc96b5cd33556a88bc3
|
9c315e3762961668a1fe58ad811ae87c5fbf7539
|
/apertium-tools/getBookNames.py
|
f309abe2db7601f668b301f996622c39c83b04b0
|
[] |
no_license
|
frankier/apertium
|
f2b893115c413203b1194e5c0d4feb0adf2b1b3e
|
d3f5515bf2455f3046314a62ea564457bcf504b8
|
refs/heads/gnulib
| 2021-01-20T21:00:53.139135 | 2016-05-27T17:30:01 | 2016-05-27T17:30:01 | 59,847,975 | 0 | 1 | null | 2016-07-07T12:39:01 | 2016-05-27T16:21:14 |
HTML
|
UTF-8
|
Python
| false | false | 1,248 |
py
|
#!/usr/bin/env python3
import os
import sys
import pprint
import argparse
#!/usr/bin/env python
#langs = ["xal", "chv", "tat", "kaz", "kaz2", "alt", "bua", "kir", "tgk", "tyv", "kaa", "gag", "kum", "aze", "kjh"] #POSSIBLE languages, kaz2 is a second kaz translation of the Bible
def todict(langs):
langData = {} #this is a dictionary
for lang in langs:
langData[lang] = {}
with open("%s.dat" % lang) as databaseFile:
for line in databaseFile :
if line.strip():
(english, target) = line.split(',')
langData[lang][english] = target.strip()
return langData
def main():
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This script generates a dictionary from a .dat file in trunk/apertium-tools')
parser.add_argument('datfile', metavar='i', help='Languages (3 letter iso code) separated by a comma, make sure the corresponding .dat files exist')
args = vars(parser.parse_args())
if "," in args['datfile']:
langs=args['datfile'].split(",")
else:
langs=[args['datfile']]
langDict=todict(langs)
pprint.pprint(langDict)
main()
|
[
"unhammer@72bbbca6-d526-0410-a7d9-f06f51895060"
] |
unhammer@72bbbca6-d526-0410-a7d9-f06f51895060
|
0137356885a920c1fd4914505128d497798afb4f
|
ac0894b411507bfd027696b6bf11b5e384ed68fc
|
/need-to-do/python3------download-problem--of--leetcode/796.rotate-string.py
|
72980badbb5ee3be57bade30782a99641d9abe38
|
[] |
no_license
|
mkzpd/leetcode-solution
|
1d19554628c34c74012fa52582c225e6dccb345c
|
60c9b218683bcdee86477a910c58ec702185c726
|
refs/heads/master
| 2020-05-31T05:56:48.985529 | 2019-09-20T09:10:49 | 2019-09-20T09:10:49 | 190,128,627 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 833 |
py
|
#
# @lc app=leetcode id=796 lang=python3
#
# [796] Rotate String
#
# https://leetcode.com/problems/rotate-string/description/
#
# algorithms
# Easy (49.30%)
# Total Accepted: 47.2K
# Total Submissions: 95.8K
# Testcase Example: '"abcde"\n"cdeab"'
#
# We are given two strings, A and B.
#
# A shift on A consists of taking string A and moving the leftmost character to
# the rightmost position. For example, if A = 'abcde', then it will be 'bcdea'
# after one shift on A. Return True if and only if A can become B after some
# number of shifts on A.
#
#
# Example 1:
# Input: A = 'abcde', B = 'cdeab'
# Output: true
#
# Example 2:
# Input: A = 'abcde', B = 'abced'
# Output: false
#
#
# Note:
#
#
# A and B will have length at most 100.
#
#
#
class Solution:
def rotateString(self, A: str, B: str) -> bool:
|
[
"[email protected]"
] | |
eb32725b060733641d92539f1dad81793d9e1b55
|
e7bba3dd662bf2778c36a406f72ee93b2ea05e11
|
/CardinalityEstimationTestbed/Synthetic/deepdb/deepdb_job_ranges/ensemble_compilation/physical_db.py
|
49aaa09f09166393579b33846891d7c049f6f700
|
[
"MIT"
] |
permissive
|
TsinghuaDatabaseGroup/AI4DBCode
|
37e45b176bc94e77fe250ea45f0ad7b9054c7f11
|
a8989bfadcf551ee1dee2aec57ef6b2709c9f85d
|
refs/heads/master
| 2023-07-07T05:42:15.590000 | 2023-07-04T01:04:15 | 2023-07-04T01:04:15 | 217,175,047 | 53 | 35 | null | 2023-06-20T13:00:17 | 2019-10-24T00:03:14 |
Scala
|
UTF-8
|
Python
| false | false | 4,011 |
py
|
import pandas as pd
import psycopg2
from ensemble_compilation.utils import gen_full_join_query, print_conditions
class DBConnection:
def __init__(self, db='postgres', db_user='postgres', db_host="/var/run/postgresql", db_password="jintao",
db_port="5432"):
self.db_user = db_user
self.db_password = db_password
self.db_host = db_host
self.db_port = db_port
self.db = db
def vacuum(self):
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
old_isolation_level = connection.isolation_level
connection.set_isolation_level(0)
query = "VACUUM"
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.set_isolation_level(old_isolation_level)
def get_dataframe(self, sql):
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
return pd.read_sql(sql, connection)
def submit_query(self, sql):
"""Submits query and ignores result."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
def get_result(self, sql):
"""Fetches exactly one row of result set."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
record = cursor.fetchone()
result = record[0]
if connection:
cursor.close()
connection.close()
return result
def get_result_set(self, sql, return_columns=False):
"""Fetches all rows of result set."""
connection = psycopg2.connect(user=self.db_user,
password=self.db_password,
host=self.db_host,
port=self.db_port,
database=self.db)
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
columns = [desc[0] for desc in cursor.description]
if connection:
cursor.close()
connection.close()
if return_columns:
return rows, columns
return rows
class TrueCardinalityEstimator:
"""Queries the database to return true cardinalities."""
def __init__(self, schema_graph, db_connection):
self.schema_graph = schema_graph
self.db_connection = db_connection
def true_cardinality(self, query):
full_join_query = gen_full_join_query(self.schema_graph, query.relationship_set, query.table_set, "JOIN")
where_cond = print_conditions(query.conditions, seperator='AND')
if where_cond != "":
where_cond = "WHERE " + where_cond
sql_query = full_join_query.format("COUNT(*)", where_cond)
cardinality = self.db_connection.get_result(sql_query)
return sql_query, cardinality
|
[
"[email protected]"
] | |
9561f4f411321b341924c9a0d254730272840d1c
|
ce6cb09c21470d1981f1b459293d353407c8392e
|
/tests/unit/modules/test_settings.py
|
5aef7f7f0ae31d7af5d8157132b9b09f13efe129
|
[
"Apache-2.0"
] |
permissive
|
minefuto/healthbot-py-client
|
c4be4c9c3153ef64b37e5344bf84154e93e7b521
|
bb81452c974456af44299aebf32a73abeda8a943
|
refs/heads/master
| 2022-12-04T07:47:04.722993 | 2020-05-13T14:04:07 | 2020-05-13T14:04:07 | 290,145,286 | 0 | 0 |
Apache-2.0
| 2020-08-25T07:27:54 | 2020-08-25T07:27:53 | null |
UTF-8
|
Python
| false | false | 19,263 |
py
|
import unittest
from nose.plugins.attrib import attr
from mock import patch
from jnpr.healthbot import HealthBotClient
from jnpr.healthbot import NotificationSchema
from jnpr.healthbot import NotificationSchemaSlack
from jnpr.healthbot import SchedulerSchema
from jnpr.healthbot import DestinationSchema
from jnpr.healthbot import ReportSchema
from jnpr.healthbot import RetentionPolicySchema
from requests.models import Response
from . import _mock_user_login
@attr('unit')
class TestSettings(unittest.TestCase):
@patch('jnpr.healthbot.healthbot.requests.Session')
@patch('jnpr.healthbot.swagger.api.authentication_api.AuthenticationApi.user_login')
def setUp(self, mock_user_login, mock_request):
self.mock_user_login = _mock_user_login
self.mock_request = mock_request
self.mock_request().get.side_effect = self._mock_manager
self.conn = HealthBotClient(
server='1.1.1.1',
user='test',
password='password123').open()
self.conn.api_client.call_api = self._mock_manager
def tearDown(self) -> None:
self.conn.close()
def test_add_notification(self):
ns = NotificationSchema(notification_name='HbEZ-notification')
ns.description = "example of adding notification via API"
nss = NotificationSchemaSlack(channel="HbEZ", url='http://testing')
ns.slack = nss
self.assertTrue(self.conn.settings.notification.add(ns))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/notification/HbEZ-notification')
# add without schema
self.assertTrue(
self.conn.settings.notification.add(
notification_name='HbEZ-notification',
description="example of adding notification via API"))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/notification/HbEZ-notification')
def test_add_scheduler(self):
sc = SchedulerSchema(
name='HbEZ-schedule',
repeat={
'every': 'week'},
start_time="2019-07-22T05:32:23Z")
self.assertTrue(self.conn.settings.scheduler.add(sc))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/scheduler/HbEZ-schedule')
# add without schema
self.assertTrue(
self.conn.settings.scheduler.add(
name='HbEZ-schedule',
repeat={
'every': 'week'},
start_time="2019-07-22T05:32:23Z"))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/scheduler/HbEZ-schedule')
def test_add_destinaton(self):
ds = DestinationSchema(
name='HbEZ-destination',
email={
'id': '[email protected]'})
self.assertTrue(self.conn.settings.destination.add(ds))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/report-generation/destination/HbEZ-destination')
# add without schema
self.assertTrue(self.conn.settings.destination.add(
name='HbEZ-destination', email={'id': '[email protected]'}))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/report-generation/destination/HbEZ-destination')
def test_add_report(self):
rs = ReportSchema(
name="HbEZ-report",
destination=['HbEZ-destination'],
format="html",
schedule=["HbEZ-schedule"])
self.assertTrue(self.conn.settings.report.add(rs))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/report-generation/report/HbEZ-report')
# add without schema
self.assertTrue(
self.conn.settings.report.add(
name="HbEZ-report",
destination=['HbEZ-destination'],
format="html",
schedule=["HbEZ-schedule"]))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/system-settings/report-generation/report/HbEZ-report')
def test_add_retention_policy(self):
rps = RetentionPolicySchema(retention_policy_name='HbEZ-testing')
self.assertTrue(
self.conn.settings.retention_policy.add(rps))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/retention-policy/HbEZ-testing')
# without creating schema
self.assertTrue(
self.conn.settings.retention_policy.add(
retention_policy_name='HbEZ-testing'))
self.assertEqual(self.mock_request().mock_calls[2][0], 'post')
self.assertEqual(
self.mock_request().mock_calls[2][1][0],
'https://1.1.1.1:8080/api/v1/retention-policy/HbEZ-testing')
def test_get_notification(self):
ns = self.conn.settings.notification.get(
notification_name='HbEZ-notification')
self.assertEqual(
ns.description,
"example of adding notification via API")
self.assertEqual(ns.notification_name, "HbEZ-notification")
def test_get_notification_error(self):
self.assertRaises(
ValueError,
self.conn.settings.notification.get,
notification_name='error')
def test_get_scheduler(self):
sc = self.conn.settings.scheduler.get(name='HbEZ-schedule')
self.assertEqual(sc.repeat, {
"every": "week"
})
def test_get_destination(self):
ds = self.conn.settings.destination.get(
name='HbEZ-destination')
self.assertEqual(ds.email, {'id': '[email protected]'})
def test_get_report(self):
rs = self.conn.settings.report.get(name="HbEZ-report")
self.assertEqual(rs.format, 'html')
def test_get_retention_policy(self):
rp = self.conn.settings.retention_policy.get(
'HbEZ-testing')
self.assertEqual(rp.retention_policy_name, "HbEZ-testing")
def test_get_reports(self):
rs = self.conn.settings.report.get()
self.assertGreater(len(rs), 0)
def test_get_notifications(self):
ns = self.conn.settings.notification.get()
self.assertGreater(len(ns), 0)
def test_get_schedulers(self):
sc = self.conn.settings.scheduler.get()
self.assertGreater(len(sc), 0)
def test_get_destinations(self):
des = self.conn.settings.destination.get()
self.assertGreater(len(des), 0)
def test_get_reports(self):
rep = self.conn.settings.report.get()
self.assertGreater(len(rep), 0)
def test_get_retention_policies(self):
rp = self.conn.settings.retention_policy.get()
self.assertGreater(len(rp), 0)
def test_delete_notification(self):
ret = self.conn.settings.notification.delete(
notification_name='HbEZ-notification')
self.assertTrue(ret)
self.assertEqual(self.mock_request().mock_calls[2][0],
'delete')
def test_delete_scheduler(self):
ret = self.conn.settings.scheduler.delete(
name='HbEZ-schedule')
self.assertTrue(ret)
self.assertEqual(self.mock_request().mock_calls[2][0],
'delete')
def test_delete_destinaton(self):
ret = self.conn.settings.destination.delete(
name='HbEZ-destination')
self.assertTrue(ret)
self.assertEqual(self.mock_request().mock_calls[2][0],
'delete')
def test_delete_report(self):
ret = self.conn.settings.report.delete(name="HbEZ-report")
self.assertTrue(ret)
self.assertEqual(self.mock_request().mock_calls[2][0],
'delete')
def test_delete_retention_policy(self):
ret = self.conn.settings.retention_policy.delete(
"HbEZ-testing")
self.assertTrue(ret)
self.assertEqual(self.mock_request().mock_calls[2][0],
'delete')
def test_update_notification(self):
ns = self.conn.settings.notification.get(
notification_name='HbEZ-notification')
from jnpr.healthbot import NotificationSchemaHttppost
ns.http_post = NotificationSchemaHttppost(url='https://juniper.net')
self.conn.settings.notification.update(ns)
self.assertEqual(
self.mock_request().mock_calls[3][2]['json']['http-post']['url'],
'https://juniper.net')
def test_update_scheduler(self):
sc = self.conn.settings.scheduler.get(name='HbEZ-schedule')
sc.repeat = {'every': 'daily'}
self.conn.settings.scheduler.update(sc)
self.assertEqual(
self.mock_request().mock_calls[3][2]['json']['repeat']['every'],
'daily')
def test_update_destination(self):
ds = self.conn.settings.destination.get(
name='HbEZ-destination')
ds.email = {'id': '[email protected]'}
self.conn.settings.destination.update(ds)
self.assertEqual(
self.mock_request().mock_calls[3][2]['json']['email']['id'],
'[email protected]')
def test_update_report(self):
rs = self.conn.settings.report.get(name="HbEZ-report")
rs.format = 'json'
self.conn.settings.report.update(rs)
self.assertEqual(
self.mock_request().mock_calls[3][2]['json']['format'],
'json')
def test_update_retention_policy(self):
rp = self.conn.settings.retention_policy.get(
'HbEZ-testing')
rp.duration = '10h'
self.conn.settings.retention_policy.update(rp)
self.assertEqual(
self.mock_request().mock_calls[3][2]['json']['duration'],
'10h')
def test_license_get(self):
ret = self.conn.settings.license.get(
'xxx-xx-xxx')
self.assertEqual(ret["customer-id"], "xxxx")
def test_license_get_ids(self):
ret = self.conn.settings.license.get_ids()
self.assertEqual(ret, ['xxx-xx-xxx', 'yyy-yy-yyy'])
def test_license_get_features(self):
ret = self.conn.settings.license.get_features()
self.assertEqual(ret.feature_description, "Max G8")
def test_license_get_all(self):
ret = self.conn.settings.license.get()
self.assertEqual(ret[0].validity_type, "countdown")
def test_license_delete(self):
self.assertTrue(self.conn.settings.license.delete("xxx-xx-xxx"))
def _mock_manager(self, *args, **kwargs):
class MockResponse(Response):
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
@property
def text(self):
return 'did you just hit an error'
def json(self):
return self.json_data
def to_dict(self):
return self.json_data
def raise_for_status(self):
return None
if args[0] == 'https://1.1.1.1:8080/api/v1/notification/HbEZ-notification/?working=true':
return MockResponse({
"description": "example of adding notification via API",
"notification-name": "HbEZ-notification",
"slack": {
"channel": "HbEZ",
"url": "http://testing"
}
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/scheduler/HbEZ-schedule/?working=true':
return MockResponse({
"name": "HbEZ-schedule",
"repeat": {
"every": "week"
},
"start-time": "2019-07-22T05:32:23Z"
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/report-generation/destination/HbEZ-destination/?working=true':
return MockResponse({
"email": {
"id": "[email protected]"
},
"name": "HbEZ-destination"
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/report-generation/report/HbEZ-report/?working=true':
return MockResponse({
"destination": [
"HbEZ-destination"
],
"format": "html",
"name": "HbEZ-report",
"schedule": [
"HbEZ-schedule"
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/report-generation/reports/?working=true':
return MockResponse({
"report": [
{
"destination": [
"HbEZ-destination"
],
"format": "html",
"name": "HbEZ-report",
"schedule": [
"HbEZ-schedule"
]
}
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/notifications/?working=true':
return MockResponse({
"notification": [
{
"description": "example of adding notification via API",
"notification-name": "HbEZ-notification",
"slack": {
"channel": "HbEZ",
"url": "http://testing"
}
}
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/schedulers/?working=true':
return MockResponse({
"scheduler": [
{
"name": "HbEZ-schedule",
"repeat": {
"every": "week"
},
"start-time": "2019-07-22T05:32:23Z"
}
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/system-settings/report-generation/destinations/?working=true':
return MockResponse({
"destination": [
{
"email": {
"id": "[email protected]"
},
"name": "HbEZ-destination"
}
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/retention-policy/HbEZ-testing/?working=true':
return MockResponse({
"retention-policy-name": "HbEZ-testing"
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/retention-policies/?working=true':
return MockResponse({
"retention-policy": [
{
"retention-policy-name": "HbEZ-testing"
}
]
}, 200)
elif args[0] == 'https://1.1.1.1:8080/api/v1/notification/error/?working=true':
obj = MockResponse(None, 404)
def fn():
raise ValueError
obj.raise_for_status = fn
return obj
elif args[0] == '/license/key/{license_id}/contents/' and \
args[2] == {'license_id': 'xxx-xx-xxx'}:
return {"customer-id": "xxxx",
"end-date": "2106-02-0xxx",
"features": [{"capacity-flag": False,
"capacity-value": 1,
"feature-description": "Allow a..",
"feature-id": 10001,
"feature-name": "xxxx"},
],
"license-id": "xxx-xx-xxx",
"mode": "standalone",
"order-type": "commercial",
"sku-name": "HBxxx",
"start-date": "20xxx",
"sw-serial-id": "07xxx",
"validity-type": "xxx",
"version": 1}
elif args[0] == '/license/keys/contents/':
from jnpr.healthbot import LicenseKeySchema
from jnpr.healthbot import LicenseKeysSchema
return LicenseKeysSchema([LicenseKeySchema(**{"customer_id": "xxxx",
"end_date": "2106-02-0xxx",
"features": [{"capacity_flag": False,
"capacity_value": 1,
"feature_description": "Allow a..",
"feature_id": 10001,
"feature_name": "xxxx"},
],
"license_id": "xxx-xx-xxx",
"mode": "standalone",
"order_type": "commercial",
"sku_name": "HBxxx",
"start_date": "20xxx",
"sw_serial_id": "07xxx",
"validity_type": "countdown",
"version": 1})])
elif args[0] == '/license/keys/':
return ["xxx-xx-xxx", "yyy-yy-yyy"]
elif args[0] == '/license/status/':
from jnpr.healthbot import LicenseFeaturesSchema
from jnpr.healthbot import LicenseFeatureSchema
return LicenseFeaturesSchema(license_feature= LicenseFeatureSchema(**{'compliance': True,
'end_date': 111,
'feature_description': 'Max G8',
'feature_id': 111,
'feature_name': 'xxxx',
'license_remaining': 1,
'license_requested': 1,
'license_total': 1,
'license_usage': 1,
'max_remaining_days': 1,
'mode': 'standalone',
'valid_until': 'xxxx',
'validity_type': 'countdown'}))
elif args[0] == '/license/key/{license_id}/' and args[1] == 'DELETE' and\
args[2] == {'license_id': 'xxx-xx-xxx'}:
return None
return MockResponse(None, 404)
|
[
"[email protected]"
] | |
9edf384e79dea79baa8bb61cf6401ef072f974a0
|
a03b30ee77b49e19a72b647e984b98f878c2847a
|
/Anaconda-files/Programs_13c.py
|
86e9478978c292b6db68c6833bab67a5bbdf74f5
|
[
"BSD-2-Clause"
] |
permissive
|
SSalaPla/dynamical-systems-with-applications-using-python
|
d47f46dfbe7195d2446cdee7f874cc3e4a5ab90a
|
c80582ae3559230d12e2aee15f94c465e367fdda
|
refs/heads/master
| 2021-05-03T16:00:31.561907 | 2018-02-05T15:16:13 | 2018-02-05T15:16:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
# Programs_13c: The Leslie matrix. See Example 4.
# Compute the population distribution after 50 years.
# Determine the eigenvalues and eigenvectors of a Leslie matrix.
import numpy as np
import numpy.linalg as LA
L=np.array([[0,3,1],[0.3,0,0],[0,0.5,0]])
X0=np.array([[1000],[2000],[3000]])
X_50=np.dot(LA.matrix_power(L,50),X0)
X_50=X_50.round()
print('X(50)=',X_50)
dL,VL=LA.eig(L)
print('Eigenvalues=',dL)
print('Eigenvectors=',VL)
|
[
"[email protected]"
] | |
82958dfb2f0172c53857321c5004392e3ea3c047
|
c5d68f58c9523257a8b41954553f5cff2cd5f487
|
/Secao_06_Lista_Ex_62e/ex_56.py
|
bd2bccb3ca2646da55f1e255bafaab409a0bf57a
|
[] |
no_license
|
SouzaCadu/guppe
|
04bfcde82d4404eb9ec795006c6931ba07dc72b6
|
1f8a672230c5c27712f522e1e34516591c012453
|
refs/heads/master
| 2023-03-13T01:32:51.019871 | 2021-02-25T17:02:59 | 2021-02-25T17:02:59 | 320,908,119 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 868 |
py
|
"""
faça um programa que some todos os números primos abaixo de 2MM
Observações
1) Existe um teorema na matemática que diz que se um número não possui divisores até sua raiz quadrada então ele é primo, por isso o num**0.5
2) Este código supõe que o número n inserido será maior que 0, por isso a soma já começa = 2, uma vez que 2 é primo. E só passa a executar a verificação se n>1, caso contrário é impresso apenas 2.
"""
contador = 1
num = 3
soma = 2
referencia = 2000000
while num < referencia:
primo = True
verificador = 3
while verificador <= num ** 0.5 and primo:
if num % verificador == 0:
primo = False
verificador += 2
if primo:
contador += 1
soma = soma + num
num += 2
# print(f"{num}", end=" ")
print(f'\n')
print(f"A soma dos {num} números primos é {soma}.")
|
[
"[email protected]"
] | |
016beda8449388bcc4a78f821ef89a6b1d737a78
|
aee00a21081bb0d6a2ed96218d650663294dd0dc
|
/pyLibrary/queries/es_query_aggop.py
|
186257e87b901ca55ba06d4d1801ea4b85e926b0
|
[] |
no_license
|
klahnakoski/Datazilla2ElasticSearch
|
8c386b0ed3f52412981cae852e7ecb66f17b43cb
|
9675cbdb5fb0428a247f38e7088a8f42f19f3e20
|
refs/heads/master
| 2021-01-22T09:33:40.958865 | 2014-10-20T20:19:35 | 2014-10-20T20:19:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,106 |
py
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from ..collections.matrix import Matrix
from ..collections import AND
from ..structs.wraps import listwrap
from ..struct import unwrap
from ..queries import es_query_util
from ..queries.es_query_util import aggregates, fix_es_stats, buildESQuery
from ..queries.filters import simplify
from ..queries import MVEL
from ..queries.cube import Cube
def is_aggop(query):
if not query.edges:
return True
return False
def es_aggop(es, mvel, query):
select = listwrap(query.select)
esQuery = buildESQuery(query)
isSimple = AND(aggregates[s.aggregate] == "count" for s in select)
if isSimple:
return es_countop(es, query) # SIMPLE, USE TERMS FACET INSTEAD
value2facet = dict() # ONLY ONE FACET NEEDED PER
name2facet = dict() # MAP name TO FACET WITH STATS
for s in select:
if s.value not in value2facet:
if MVEL.isKeyword(s.value):
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"field": s.value
},
"facet_filter": simplify(query.where)
}
else:
unwrap(esQuery.facets)[s.name] = {
"statistical": {
"script": mvel.compile_expression(s.value, query)
},
"facet_filter": simplify(query.where)
}
value2facet[s.value] = s.name
name2facet[s.name] = value2facet[s.value]
data = es_query_util.post(es, esQuery, query.limit)
matricies = {s.name: Matrix(value=fix_es_stats(unwrap(data.facets)[s.name])[aggregates[s.aggregate]]) for s in select}
cube = Cube(query.select, [], matricies)
cube.frum = query
return cube
def es_countop(es, mvel, query):
"""
RETURN SINGLE COUNT
"""
select = listwrap(query.select)
esQuery = buildESQuery(query)
for s in select:
if MVEL.isKeyword(s.value):
esQuery.facets[s.name] = {
"terms": {
"field": s.value,
"size": query.limit,
},
"facet_filter":{"exists":{"field":s.value}}
}
else:
# COMPLICATED value IS PROBABLY A SCRIPT, USE IT
esQuery.facets[s.name] = {
"terms": {
"script_field": mvel.compile_expression(s.value, query),
"size": 200000
}
}
data = es_query_util.post(es, esQuery, query.limit)
matricies = {}
for s in select:
matricies[s.name] = Matrix(value=data.hits.facets[s.name].total)
cube = Cube(query.select, query.edges, matricies)
cube.frum = query
return cube
|
[
"[email protected]"
] | |
a047aee51e0337ee44f1cded42f2c410b3866aad
|
929fba6e9f74cc109d98efdc0f32fa4cadbd4def
|
/Mathematics/1161.py
|
ceab72e57e789f3cd59716a281a2cb5cfb4428e1
|
[
"MIT"
] |
permissive
|
LorranSutter/URI-Online-Judge
|
2be9d95a27e52fad6bb1ae189d9bb39c72a43228
|
01822b6124a535aeecbdbdad616b61f2d55dd8d4
|
refs/heads/master
| 2023-01-28T00:15:34.569999 | 2023-01-24T03:08:38 | 2023-01-24T03:08:38 | 141,393,679 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
fats = {
'0':1,
'1':1,
'2':2,
'3':6,
'4':24,
'5':120,
'6':720,
'7':5040,
'8':40320,
'9':362880,
'10':3628800,
'11':39916800,
'12':479001600,
'13':6227020800,
'14':87178291200,
'15':1307674368000,
'16':20922789888000,
'17':355687428096000,
'18':6402373705728000,
'19':121645100408832000,
'20':2432902008176640000
}
while True:
try:
M, N = input().split()
print(fats[M] + fats[N])
except:
break
|
[
"[email protected]"
] | |
4b874967bf34bdc5d672c7193fca61f4f6696d35
|
854394f4148e7bee8cd3c6d2a01e97ffbf772103
|
/0x02-python-import_modules/2-args.py
|
a3ea363e7986241d9426c33948fa9622f6fa3db5
|
[] |
no_license
|
garethbrickman/holbertonschool-higher_level_programming
|
cb3ccb864102d62af72b5e86d53638bd899bfabb
|
05d65c6c89008cb70cbc1ada5bb9c8ed7a2733e9
|
refs/heads/master
| 2021-07-10T08:32:23.397388 | 2020-10-15T18:40:55 | 2020-10-15T18:40:55 | 207,379,469 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
#!/usr/bin/python3
if __name__ == '__main__':
from sys import argv
argc = len(argv)
if argc < 2:
print("0 arguments.")
for i in range(1, argc):
if argc < 3:
print("1 argument:")
print("1: {}".format(argv[1]))
else:
if i == 1:
print("{} arguments:".format(argc-1))
print("{}: {}".format(i, argv[i]))
|
[
"[email protected]"
] | |
7065e46a10f60850b0d2b2f007bfe93dcacb0bec
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/1318.py
|
e3cf5015dc7050102c3190364333952ee4a6a424
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 538 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def main(inp):
D, N = inp.split()
D, N = int(D), int(N)
horses = []
for i in range(N):
K, S = input().split()
K, S = int(K), int(S)
horses.append((K, S))
slowest_time = 0
for horse in horses:
time = (D - horse[0]) / horse[1]
if time > slowest_time:
slowest_time = time
return "{0:.6f}".format(D / slowest_time)
if __name__ == '__main__':
testcases = int(input())
for case in range(testcases):
inp = input()
print("Case #{}: {}".format(case+1, main(inp)))
|
[
"[email protected]"
] | |
e7ef237a242c1df8dd3125dd680dfc3a251e39e4
|
3fda3ff2e9334433554b6cf923506f428d9e9366
|
/hipeac/migrations/0018_auto_20190131_1245.py
|
05e389d20c7a24fa3c385a730a8607cce3faedd6
|
[
"MIT"
] |
permissive
|
CreativeOthman/hipeac
|
12adb61099886a6719dfccfa5ce26fdec8951bf9
|
2ce98da17cac2c6a87ec88df1b7676db4c200607
|
refs/heads/master
| 2022-07-20T10:06:58.771811 | 2020-05-07T11:39:13 | 2020-05-07T11:44:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 464 |
py
|
# Generated by Django 2.1.5 on 2019-01-31 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("hipeac", "0017_membershiprequest"),
]
operations = [
migrations.RemoveField(model_name="membershiprequest", name="status",),
migrations.AddField(
model_name="membershiprequest", name="accepted", field=models.BooleanField(default=None, null=True),
),
]
|
[
"[email protected]"
] | |
c9eaba44939c86fbebd36c85ef891559522ecb86
|
94e00065cd54c06faf36d1ed78a067d69e94696d
|
/PPA2/npmrds_data_conflation.py
|
160324032b54bd582a94d9a4877558e07d5096b4
|
[
"MIT"
] |
permissive
|
djconly85/PPA2_0_code
|
98887eebf02738a07358b1e57ab71bf717ffc86e
|
1d67934eb3bc3c7df16cdd5867639b92c9ea45eb
|
refs/heads/master
| 2021-07-01T04:42:58.770965 | 2021-01-08T22:40:06 | 2021-01-08T22:40:06 | 207,445,494 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,417 |
py
|
# Esri start of added imports
import sys, os, arcpy
# Esri end of added imports
# Esri start of added variables
g_ESRI_variable_1 = 'fl_splitprojlines'
g_ESRI_variable_2 = 'fl_splitproj_w_tmcdata'
g_ESRI_variable_3 = "{} = '{}'"
g_ESRI_variable_4 = '{} IS NOT NULL'
g_ESRI_variable_5 = os.path.join(arcpy.env.packageWorkspace,'index')
g_ESRI_variable_6 = 'fl_project'
g_ESRI_variable_7 = 'fl_speed_data'
g_ESRI_variable_8 = '{} IN {}'
g_ESRI_variable_9 = 'fl_tmc_buff'
# Esri end of added variables
'''
#--------------------------------
# Name:PPA_getNPMRDSdata.py
# Purpose: Get distance-weighted average speed from NPMRDS data for PPA project,
#
#
# Author: Darren Conly
# Last Updated: <date>
# Updated by: <name>
# Copyright: (c) SACOG
# Python Version: <version>
#--------------------------------
Sample projects used: CAL20466, SAC25062
'''
import os
import re
import datetime as dt
import time
import arcpy
#from arcgis.features import SpatialDataFrame
import pandas as pd
import ppa_input_params as params
import ppa_utils as utils
arcpy.env.overwriteOutput = True
dateSuffix = str(dt.date.today().strftime('%m%d%Y'))
# ====================FUNCTIONS==========================================
def get_wtd_speed(in_df, in_field, direction, fld_pc_len_ft):
fielddir = "{}{}".format(direction, in_field)
fld_invspd = "spdinv_hpm"
fld_pc_tt = "projpc_tt"
fld_len_mi = "pc_len_mi"
in_df[fld_invspd] = 1/in_df[in_field] # calculate each piece's "hours per mile", or inverted speed, as 1/speed
# get each piece's travel time, in hours as inverted speed (hrs per mi) * piece distance (mi)
in_df[fld_len_mi] = in_df[fld_pc_len_ft]/params.ft2mile
in_df[fld_pc_tt] = in_df[fld_invspd] * in_df[fld_len_mi]
# get total travel time, in hours, for all pieces, then divide total distance, in miles, for all pieces by the total tt
# to get average MPH for the project
proj_mph = in_df[fld_len_mi].sum() / in_df[fld_pc_tt].sum()
return {fielddir: proj_mph}
def conflate_tmc2projline(fl_proj, dirxn_list, tmc_dir_field,
fl_tmcs_buffd, fields_calc_dict):
speed_data_fields = [k for k, v in fields_calc_dict.items()]
out_row_dict = {}
# get length of project
fld_shp_len = "SHAPE@LENGTH"
fld_totprojlen = "proj_length_ft"
with arcpy.da.SearchCursor(fl_proj, fld_shp_len) as cur:
for row in cur:
out_row_dict[fld_totprojlen] = row[0]
for direcn in dirxn_list:
# https://support.esri.com/en/technical-article/000012699
# temporary files
scratch_gdb = arcpy.env.scratchGDB
temp_intersctpts = os.path.join(scratch_gdb, "temp_intersectpoints") # r"{}\temp_intersectpoints".format(scratch_gdb)
temp_intrsctpt_singlpt = os.path.join(scratch_gdb, "temp_intrsctpt_singlpt") # converted from multipoint to single point (1 pt per feature)
temp_splitprojlines = os.path.join(scratch_gdb, "temp_splitprojlines") # fc of project line split up to match TMC buffer extents
temp_splitproj_w_tmcdata = os.path.join(scratch_gdb, "temp_splitproj_w_tmcdata") # fc of split project lines with TMC data on them
fl_splitprojlines = g_ESRI_variable_1
fl_splitproj_w_tmcdata = g_ESRI_variable_2
# get TMCs whose buffers intersect the project line
arcpy.SelectLayerByLocation_management(fl_tmcs_buffd, "INTERSECT", fl_proj)
# select TMCs that intersect the project and are in indicated direction
sql_sel_tmcxdir = g_ESRI_variable_3.format(tmc_dir_field, direcn)
arcpy.SelectLayerByAttribute_management(fl_tmcs_buffd, "SUBSET_SELECTION", sql_sel_tmcxdir)
# split the project line at the boundaries of the TMC buffer, creating points where project line intersects TMC buffer boundaries
arcpy.Intersect_analysis([fl_proj, fl_tmcs_buffd],temp_intersctpts,"","","POINT")
arcpy.MultipartToSinglepart_management (temp_intersctpts, temp_intrsctpt_singlpt)
# split project line into pieces at points where it intersects buffer, with 10ft tolerance
# (not sure why 10ft tolerance needed but it is, zero tolerance results in some not splitting)
arcpy.SplitLineAtPoint_management(fl_proj, temp_intrsctpt_singlpt,
temp_splitprojlines, "10 Feet")
arcpy.MakeFeatureLayer_management(temp_splitprojlines, fl_splitprojlines)
# get TMC speeds onto each piece of the split project line via spatial join
arcpy.SpatialJoin_analysis(temp_splitprojlines, fl_tmcs_buffd, temp_splitproj_w_tmcdata,
"JOIN_ONE_TO_ONE", "KEEP_ALL", "#", "HAVE_THEIR_CENTER_IN", "30 Feet")
# convert to fl and select records where "check field" col val is not none
arcpy.MakeFeatureLayer_management(temp_splitproj_w_tmcdata, fl_splitproj_w_tmcdata)
check_field = speed_data_fields[0] # choose first speed value field for checking--if it's null, then don't include those rows in aggregation
sql_notnull = g_ESRI_variable_4.format(check_field)
arcpy.SelectLayerByAttribute_management(fl_splitproj_w_tmcdata, "NEW_SELECTION", sql_notnull)
# convert the selected records into a numpy array then a pandas dataframe
flds_df = [fld_shp_len] + speed_data_fields
df_spddata = utils.esri_object_to_df(fl_splitproj_w_tmcdata, flds_df)
# remove project pieces with no speed data so their distance isn't included in weighting
df_spddata = df_spddata.loc[pd.notnull(df_spddata[speed_data_fields[0]])].astype(float)
# remove rows where there wasn't enough NPMRDS data to get a valid speed or reliability reading
df_spddata = df_spddata.loc[df_spddata[flds_df].min(axis=1) > 0]
dir_len = df_spddata[fld_shp_len].sum() #sum of lengths of project segments that intersect TMCs in the specified direction
out_row_dict["{}_calc_len".format(direcn)] = dir_len #"calc" length because it may not be same as project length
# go through and do conflation calculation for each TMC-based data field based on correct method of aggregation
for field, calcmthd in fields_calc_dict.items():
if calcmthd == params.calc_inv_avg: # See PPA documentation on how to calculated "inverted speed average" method
sd_dict = get_wtd_speed(df_spddata, field, direcn, fld_shp_len)
out_row_dict.update(sd_dict)
elif calcmthd == params.calc_distwt_avg:
fielddir = "{}{}".format(direcn, field) # add direction tag to field names
# if there's speed data, get weighted average value.
linklen_w_speed_data = df_spddata[fld_shp_len].sum()
if linklen_w_speed_data > 0: #wgtd avg = sum(piece's data * piece's len)/(sum of all piece lengths)
avg_data_val = (df_spddata[field]*df_spddata[fld_shp_len]).sum() \
/ df_spddata[fld_shp_len].sum()
out_row_dict[fielddir] = avg_data_val
else:
out_row_dict[fielddir] = df_spddata[field].mean() #if no length, just return mean speed? Maybe instead just return 'no data avaialble'? Or -1 to keep as int?
continue
else:
continue
#cleanup
fcs_to_delete = [temp_intersctpts, temp_intrsctpt_singlpt, temp_splitprojlines, temp_splitproj_w_tmcdata]
for fc in fcs_to_delete:
arcpy.Delete_management(fc)
return pd.DataFrame([out_row_dict])
def simplify_outputs(in_df, proj_len_col):
dirlen_suffix = '_calc_len'
proj_len = in_df[proj_len_col][0]
re_lendir_col = '.*{}'.format(dirlen_suffix)
lendir_cols = [i for i in in_df.columns if re.search(re_lendir_col, i)]
df_lencols = in_df[lendir_cols]
max_dir_len = df_lencols.max(axis = 1)[0] # direction for which project has longest intersect with TMC. assumes just one record in the output
#if there's less than 10% overlap in the 'highest overlap' direction, then say that the project is not on any TMCs (and any TMC data is from cross streets or is insufficient to represent the segment)
if (max_dir_len / proj_len) < 0.1:
out_df = pd.DataFrame([-1], columns=['SegmentSpeedData'])
return out_df.to_dict('records')
else:
max_len_col = df_lencols.idxmax(axis = 1)[0] #return column name of direction with greatest overlap
df_lencols2 = df_lencols.drop(max_len_col, axis = 1)
secndmax_col = df_lencols2.idxmax(axis = 1)[0] #return col name of direction with second-most overlap (should be reverse of direction with most overlap)
maxdir = max_len_col[:max_len_col.find(dirlen_suffix)] #direction name without '_calc_len' suffix
secdir = secndmax_col[:secndmax_col.find(dirlen_suffix)]
outcols_max = [c for c in in_df.columns if re.match(maxdir, c)]
outcols_sec = [c for c in in_df.columns if re.match(secdir, c)]
outcols = outcols_max + outcols_sec
return in_df[outcols].to_dict('records')
def make_df(in_dict):
re_dirn = re.compile("(.*BOUND).*") # retrieve direction
re_metric = re.compile(".*BOUND(.*)") # retrieve name of metric
df = pd.DataFrame.from_dict(in_dict, orient=g_ESRI_variable_5)
col_metric = 'metric'
col_direction = 'direction'
df[col_direction] = df.index.map(lambda x: re.match(re_dirn, x).group(1))
df[col_metric] = df.index.map(lambda x: re.match(re_metric, x).group(1))
df_out = df.pivot(index=col_metric, columns=col_direction, values=0 )
return df_out
def get_npmrds_data(fc_projline, str_project_type):
arcpy.AddMessage("Calculating congestion and reliability metrics...")
arcpy.OverwriteOutput = True
fl_projline = g_ESRI_variable_6
arcpy.MakeFeatureLayer_management(fc_projline, fl_projline)
# make feature layer from speed data feature class
fl_speed_data = g_ESRI_variable_7
arcpy.MakeFeatureLayer_management(params.fc_speed_data, fl_speed_data)
# make flat-ended buffers around TMCs that intersect project
arcpy.SelectLayerByLocation_management(fl_speed_data, "WITHIN_A_DISTANCE", fl_projline, params.tmc_select_srchdist, "NEW_SELECTION")
if str_project_type == 'Freeway':
sql = g_ESRI_variable_8.format(params.col_roadtype, params.roadtypes_fwy)
arcpy.SelectLayerByAttribute_management(fl_speed_data, "SUBSET_SELECTION", sql)
else:
sql = "{} NOT IN {}".format(params.col_roadtype, params.roadtypes_fwy)
arcpy.SelectLayerByAttribute_management(fl_speed_data, "SUBSET_SELECTION", sql)
# create temporar buffer layer, flat-tipped, around TMCs; will be used to split project lines
temp_tmcbuff = os.path.join(arcpy.env.scratchGDB, "TEMP_linkbuff_4projsplit")
fl_tmc_buff = g_ESRI_variable_9
arcpy.Buffer_analysis(fl_speed_data, temp_tmcbuff, params.tmc_buff_dist_ft, "FULL", "FLAT")
arcpy.MakeFeatureLayer_management(temp_tmcbuff, fl_tmc_buff)
# get "full" table with data for all directions
projdata_df = conflate_tmc2projline(fl_projline, params.directions_tmc, params.col_tmcdir,
fl_tmc_buff, params.spd_data_calc_dict)
# trim down table to only include outputs for directions that are "on the segment",
# i.e., that have most overlap with segment
out_dict = simplify_outputs(projdata_df, 'proj_length_ft')[0]
#cleanup
arcpy.Delete_management(temp_tmcbuff)
return out_dict
# =====================RUN SCRIPT===========================
'''
if __name__ == '__main__':
start_time = time.time()
workspace = None
arcpy.env.workspace = workspace
project_line = "test_project_causeway_fwy" # arcpy.GetParameterAsText(0) #"NPMRDS_confl_testseg_seconn"
proj_type = params.ptype_fwy # arcpy.GetParameterAsText(2) #"Freeway"
test_dict = get_npmrds_data(project_line, proj_type)
print(test_dict)
elapsed_time = round((time.time() - start_time)/60, 1)
print("Success! Time elapsed: {} minutes".format(elapsed_time))
'''
|
[
"[email protected]"
] | |
8e356d3b216be573090d6c6b30dd50936d9c2ba0
|
a56252fda5c9e42eff04792c6e16e413ad51ba1a
|
/resources/usr/local/lib/python2.7/dist-packages/scipy/spatial/tests/test_kdtree.py
|
319d8fecf05b82f9c6f1be17313768eda9911f70
|
[
"Apache-2.0"
] |
permissive
|
edawson/parliament2
|
4231e692565dbecf99d09148e75c00750e6797c4
|
2632aa3484ef64c9539c4885026b705b737f6d1e
|
refs/heads/master
| 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 |
Apache-2.0
| 2019-09-11T03:22:55 | 2018-09-25T10:21:03 |
Python
|
UTF-8
|
Python
| false | false | 21,121 |
py
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_, run_module_suite)
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial import minkowski_distance as distance
class ConsistencyTests:
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis,:])**2,axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2,np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2,np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(np.sum((self.data-x[np.newaxis,:])**2,axis=1) < d**2+eps),hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,distance(x,self.data[near_i],1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(distance(self.data,x,1) < d+eps),hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd,ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d,distance(x,self.data[near_i],np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d,d))
assert_equal(np.sum(distance(self.data,x,np.inf) < d+eps),hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
class test_random(ConsistencyTests):
def setUp(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = KDTree(self.data,leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
class test_random_far(test_random):
def setUp(self):
test_random.setUp(self)
self.x = np.random.randn(self.m)+10
class test_small(ConsistencyTests):
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 1),
(0.1,0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0,0,0.1), 2),
([0.1,0.9],[0,1]))
class test_small_nonleaf(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = KDTree(self.data,leafsize=1)
class test_small_compiled(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data)
class test_small_nonleaf_compiled(test_small):
def setUp(self):
test_small.setUp(self)
self.kdtree = cKDTree(self.data,leafsize=1)
class test_random_compiled(test_random):
def setUp(self):
test_random.setUp(self)
self.kdtree = cKDTree(self.data)
class test_random_far_compiled(test_random_far):
def setUp(self):
test_random_far.setUp(self)
self.kdtree = cKDTree(self.data)
class test_vectorization:
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0,0,0]))
assert_(isinstance(d,float))
assert_(np.issubdtype(i, int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0,0,0]),k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
def test_single_query_all_neighbors(self):
d, i = self.kdtree.query([0,0,0],k=None,distance_upper_bound=1.1)
assert_(isinstance(d,list))
assert_(isinstance(i,list))
def test_vectorized_query_all_neighbors(self):
d, i = self.kdtree.query(np.zeros((2,4,3)),k=None,distance_upper_bound=1.1)
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
assert_(isinstance(d[0,0],list))
assert_(isinstance(i[0,0],list))
class test_vectorization_compiled:
def setUp(self):
self.data = np.array([[0,0,0],
[0,0,1],
[0,1,0],
[0,1,1],
[1,0,0],
[1,0,1],
[1,1,0],
[1,1,1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0,0,0])
assert_(isinstance(d,float))
assert_(isinstance(i,int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2,4,3)))
assert_equal(np.shape(d),(2,4))
assert_equal(np.shape(i),(2,4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3,1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs,ds,i_s):
assert_equal(self.kdtree.query(q),(d,i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0,0,0],k=kk)
assert_equal(np.shape(d),(kk,))
assert_equal(np.shape(i),(kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2,4,3)),k=kk)
assert_equal(np.shape(d),(2,4,kk))
assert_equal(np.shape(i),(2,4,kk))
assert_(np.all(~np.isfinite(d[:,:,-s:])))
assert_(np.all(i[:,:,-s:] == self.kdtree.n))
class ball_consistency:
def test_in_ball(self):
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(distance(self.data[i],self.x,self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
c = np.ones(self.T.n,dtype=np.bool)
l = self.T.query_ball_point(self.x, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(distance(self.data[c],self.x,self.p) >= self.d/(1.+self.eps)))
class test_random_ball(ball_consistency):
def setUp(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = KDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_compiled(ball_consistency):
def setUp(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n,m)
self.T = cKDTree(self.data,leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_random_ball_approx(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.eps = 0.1
class test_random_ball_approx_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.eps = 0.1
class test_random_ball_far(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.d = 2.
class test_random_ball_far_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.d = 2.
class test_random_ball_l1(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.p = 1
class test_random_ball_l1_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.p = 1
class test_random_ball_linf(test_random_ball):
def setUp(self):
test_random_ball.setUp(self)
self.p = np.inf
class test_random_ball_linf_compiled(test_random_ball_compiled):
def setUp(self):
test_random_ball_compiled.setUp(self)
self.p = np.inf
def test_random_ball_vectorized():
n = 20
m = 5
T = KDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
def test_random_ball_vectorized_compiled():
n = 20
m = 5
np.random.seed(1234)
T = cKDTree(np.random.randn(n,m))
r = T.query_ball_point(np.random.randn(2,3,m),1)
assert_equal(r.shape,(2,3))
assert_(isinstance(r[0,0],list))
class two_trees_consistency:
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(distance(self.data1[i],self.data2[j],self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n,dtype=np.bool)
c[l] = False
assert_(np.all(distance(self.data2[c],self.data1[i],self.p) >= self.d/(1.+self.eps)))
class test_two_random_trees(two_trees_consistency):
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = KDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = KDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_compiled(two_trees_consistency):
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n,m)
self.T1 = cKDTree(self.data1,leafsize=2)
self.data2 = np.random.randn(n,m)
self.T2 = cKDTree(self.data2,leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
class test_two_random_trees_far(test_two_random_trees):
def setUp(self):
test_two_random_trees.setUp(self)
self.d = 2
class test_two_random_trees_far_compiled(test_two_random_trees_compiled):
def setUp(self):
test_two_random_trees_compiled.setUp(self)
self.d = 2
class test_two_random_trees_linf(test_two_random_trees):
def setUp(self):
test_two_random_trees.setUp(self)
self.p = np.inf
class test_two_random_trees_linf_compiled(test_two_random_trees_compiled):
def setUp(self):
test_two_random_trees_compiled.setUp(self)
self.p = np.inf
class test_rectangle:
def setUp(self):
self.rect = Rectangle([0,0],[1,1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5,0.5]),0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5,1.5]),0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2,2]),np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5,0.5]),1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5,1.5]),np.hypot(0.5,1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2,2]),2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0,0.1)
assert_array_equal(less.maxes,[0.1,1])
assert_array_equal(less.mins,[0,0])
assert_array_equal(greater.maxes,[1,1])
assert_array_equal(greater.mins,[0.1,0])
def test_distance_l2():
assert_almost_equal(distance([0,0],[1,1],2),np.sqrt(2))
def test_distance_l1():
assert_almost_equal(distance([0,0],[1,1],1),2)
def test_distance_linf():
assert_almost_equal(distance([0,0],[1,1],np.inf),1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10,1,3)
y = np.random.randn(1,7,3)
assert_equal(distance(x,y).shape,(10,7))
class test_count_neighbors:
def setUp(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class test_count_neighbors_compiled:
def setUp(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = cKDTree(np.random.randn(n,m),leafsize=2)
self.T2 = cKDTree(np.random.randn(n,m),leafsize=2)
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2,r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01),np.log(10),3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r,result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
class test_sparse_distance_matrix:
def setUp(self):
n = 50
m = 4
np.random.seed(1234)
self.T1 = KDTree(np.random.randn(n,m),leafsize=2)
self.T2 = KDTree(np.random.randn(n,m),leafsize=2)
self.r = 0.5
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
distance(self.T1.data[i], self.T2.data[j]),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870
self.T1.sparse_distance_matrix(self.T1, self.r)
class test_sparse_distance_matrix_compiled:
def setUp(self):
n = 50
m = 4
np.random.seed(0)
data1 = np.random.randn(n,m)
data2 = np.random.randn(n,m)
self.T1 = cKDTree(data1,leafsize=2)
self.T2 = cKDTree(data2,leafsize=2)
self.ref_T1 = KDTree(data1, leafsize=2)
self.ref_T2 = KDTree(data2, leafsize=2)
self.r = 0.5
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i,l in enumerate(r):
for j in l:
assert_almost_equal(M[i,j],
distance(self.T1.data[i], self.T2.data[j]),
decimal=14)
for ((i,j),d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
def test_consistency_with_python(self):
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
M2 = self.ref_T1.sparse_distance_matrix(self.ref_T2, self.r)
assert_array_almost_equal(M1.todense(), M2.todense(), decimal=14)
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
assert_equal(ds.shape, (m,n))
for i in range(m):
for j in range(n):
assert_almost_equal(distance(xs[i],ys[j]),ds[i,j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m,k)
ys = np.random.randn(n,k)
ds = distance_matrix(xs,ys)
dsl = distance_matrix(xs,ys,threshold=1)
assert_equal(ds,dsl)
def check_onetree_query(T,d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i,j))
assert_(s == T.query_pairs(d))
def test_onetree_query():
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n,k)
T = KDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = KDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_onetree_query_compiled():
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n,k)
T = cKDTree(points)
yield check_onetree_query, T, 0.1
points = np.random.randn(3*n,k)
points[:n] *= 0.001
points[n:2*n] += 2
T = cKDTree(points)
yield check_onetree_query, T, 0.1
yield check_onetree_query, T, 0.001
yield check_onetree_query, T, 0.00001
yield check_onetree_query, T, 1e-6
def test_query_pairs_single_node():
tree = KDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_query_pairs_single_node_compiled():
tree = cKDTree([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_ball_point_ints():
"""Regression test for #1373."""
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=np.float)
tree = KDTree(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
# cKDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
if __name__ == "__main__":
run_module_suite()
|
[
"[email protected]"
] | |
3d42f04e1dbdfd001aec0c19bf420821cdefd8be
|
d89eea893b1491b545075bc16eb63b9e99aabf45
|
/store/urls.py
|
6f8661c1157fed563f0d8f73dbae06037e48e4c3
|
[] |
no_license
|
kkthecompguy/allsafeshop
|
ed6d19555e3bfffe54812a399c62380a5189c229
|
836919d6652fccc72ad95c097f627b82d6d2504e
|
refs/heads/master
| 2023-02-06T06:34:16.504053 | 2021-01-02T16:45:11 | 2021-01-02T16:45:11 | 326,227,395 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 360 |
py
|
from django.urls import path
from .views import store, cart, checkout, add_to_cart, place_order
app_name = "store"
urlpatterns = [
path('', store, name='store'),
path('cart', cart, name='cart'),
path('checkout', checkout, name='checkout'),
path('add-to-cart', add_to_cart, name='add-to-cart'),
path('place-order', place_order, name='place-order'),
]
|
[
"[email protected]"
] | |
3ae23f556592d59e06b9d9779437a55a17712b25
|
14f4d045750f7cf45252838d625b2a761d5dee38
|
/argo/test/test_io_k8s_api_storage_v1beta1_csi_node_list.py
|
d6234ac236a906689b94489a832c768f4bfb9f87
|
[] |
no_license
|
nfillot/argo_client
|
cf8d7413d728edb4623de403e03d119fe3699ee9
|
c8cf80842f9eebbf4569f3d67b9d8eff4ba405fa
|
refs/heads/master
| 2020-07-11T13:06:35.518331 | 2019-08-26T20:54:07 | 2019-08-26T20:54:07 | 204,546,868 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.14.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import argo
from models.io_k8s_api_storage_v1beta1_csi_node_list import IoK8sApiStorageV1beta1CSINodeList # noqa: E501
from argo.rest import ApiException
class TestIoK8sApiStorageV1beta1CSINodeList(unittest.TestCase):
"""IoK8sApiStorageV1beta1CSINodeList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIoK8sApiStorageV1beta1CSINodeList(self):
"""Test IoK8sApiStorageV1beta1CSINodeList"""
# FIXME: construct object with mandatory attributes with example values
# model = argo.models.io_k8s_api_storage_v1beta1_csi_node_list.IoK8sApiStorageV1beta1CSINodeList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
c23ec8c67e4e0266f52cc21a90f42748f9f6b3d7
|
124cabad0cbf1e7249958d087d666231623444dc
|
/monkeys/post_image.py
|
d7005028aa1fd7ca8605a23c621b46a87f2eb57d
|
[] |
no_license
|
shish/code-portfolio
|
e7bfe0f2f8c357f124e942a4e836dc06f33bede2
|
a33d65011f26874f0626b4c9ae50affce36c407a
|
refs/heads/master
| 2023-07-07T14:12:07.883334 | 2023-06-21T11:00:54 | 2023-06-21T11:00:54 | 4,450,516 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,299 |
py
|
# tribes/civicboom/post_image.py
from tribes.civicboom import CivicboomMonkey
class PostImageMonkey(CivicboomMonkey):
def run(self):
self.log_in_as("unittest")
# create an article
response = self.post(
"/contents.json",
params={
'_authentication_token': self.auth_token,
'title': "Attachment test",
'type': "draft",
'content': "Media Incoming",
},
status=201
)
my_article_id = response.json["data"]["id"]
# upload an attachment
self.post(
"/contents/%d.json" % my_article_id,
params={
'_method': 'PUT',
'_authentication_token': self.auth_token,
'media_caption': "A random image",
'media_credit': "Test Monkey",
},
upload_files = [
("media_file", "landscape.png", self.generate_image((400, 300), 42))
],
)
# publish the article
self.post(
"/contents/%d.json" % my_article_id,
params={
'_authentication_token': self.auth_token,
'_method': 'PUT',
'type': "article",
}
)
|
[
"[email protected]"
] | |
ac519a5a420f5a5d46df514bc6e310ef24fdad7c
|
747255e913980d401341f164366a67d2a5c302af
|
/video_slomo.py
|
76cef827b46a4bc4d055fb691a9a5385d6cf90ce
|
[] |
no_license
|
zhaoyuzhi/Auto-Crop-Videos-and-Blur-Modelling
|
5365e5f4eea6521e2251ce41f57b6d30223b961d
|
345a67316483b1c2c40e63b0a43b87d6de410d51
|
refs/heads/master
| 2022-12-03T05:34:24.333430 | 2020-08-29T03:50:36 | 2020-08-29T03:50:36 | 255,800,343 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,939 |
py
|
import argparse
import os
import cv2
import numpy as np
import VideoFrameConversion as vfc
import SuperSloMo as vslomo
def get_files(path):
# read a folder, return the complete path
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(os.path.join(root, filespath))
return ret
def get_jpgs(path):
# read a folder, return the image name
ret = []
for root, dirs, files in os.walk(path):
for filespath in files:
ret.append(filespath)
return ret
def text_save(content, filename, mode = 'a'):
# save a list to a txt
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i]) + '\n')
file.close()
def check_path(path):
if not os.path.exists(path):
os.makedirs(path)
def get_statics(opt, time, fps):
interval_value = int(time / opt.interval_second)
print('Current center interval frames equal to:', interval_value)
interval_second_list = []
for i in range(interval_value):
this_interval_time = opt.interval_second * (i + 0.5)
interval_second_list.append(this_interval_time)
print('Time list:', interval_second_list)
interval_frame_list = []
for j, t in enumerate(interval_second_list):
this_interval_frame = int(t * fps)
interval_frame_list.append(this_interval_frame)
print('Frame list:', interval_frame_list)
return interval_frame_list
def get_interp_video(opt):
print(opt.videopath)
fps, frames, time, width, height = vfc.get_video_info(opt.videopath)
fps = round(fps) * opt.exposure_type
width = opt.resize_w
height = opt.resize_h
print("corrected video fps =", fps)
print("corrected video width =", width)
print("corrected video height =", height)
# create a video writer
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
print('Saving folder:', opt.savepath)
check_path(opt.savepath)
savepath = os.path.join(opt.savepath, opt.videopath.split('/')[-1] + '_interp.mp4')
video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))
# create Super Slomo network
interp, flow, back_warp = vslomo.create_slomonet(opt)
# read and write
vc = cv2.VideoCapture(opt.videopath)
# whether it is truely opened
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print(rval)
# save frames
c = 1
while rval:
# interpolation
last_frame = frame # "last_frame" saves frame from last loop
last_frame = cv2.resize(last_frame, (width, height))
c = c + 1
cv2.waitKey(1)
rval, frame = vc.read() # "frame" saves frame of Current time
if frame is None:
frame = last_frame
frame = cv2.resize(frame, (width, height))
interp_frames = vslomo.save_inter_frames(last_frame, frame, opt, interp, flow, back_warp)
# write frames
video.write(last_frame)
print('This is %d-th interval. Original frame %d is saved' % (i + 1, c - 1))
for k, interp_frame in enumerate(interp_frames):
video.write(interp_frame)
print('This is %d-th interval. Interpolated frames are saved %d times' % (i + 1, k + 1))
# release the video
vc.release()
video.release()
cv2.destroyAllWindows()
print('Released!')
def get_interp_videos(opt):
videolist = get_files(opt.video_folder_path)[:11]
print(videolist)
for item, videopath in enumerate(videolist):
# video statics
fps, frames, time, width, height = vfc.get_video_info(videopath)
fps = round(fps) * opt.exposure_type
width = opt.resize_w
height = opt.resize_h
print("corrected video fps =", fps)
print("corrected video width =", width)
print("corrected video height =", height)
# create a video writer
fourcc = cv2.VideoWriter_fourcc('m','p','4','v')
print('Saving folder:', opt.savepath)
check_path(opt.savepath)
savepath = os.path.join(opt.savepath, videopath.split('/')[-1] + '_interp.mp4')
video = cv2.VideoWriter(savepath, fourcc, fps, (width, height))
# create Super Slomo network
interp, flow, back_warp = vslomo.create_slomonet(opt)
# read and write
vc = cv2.VideoCapture(videopath)
# whether it is truely opened
if vc.isOpened():
rval, frame = vc.read()
else:
rval = False
print(rval)
# save frames
c = 1
while rval:
# interpolation
last_frame = frame # "last_frame" saves frame from last loop
last_frame = cv2.resize(last_frame, (width, height))
c = c + 1
cv2.waitKey(1)
rval, frame = vc.read() # "frame" saves frame of Current time
if frame is None:
frame = last_frame
frame = cv2.resize(frame, (width, height))
interp_frames = vslomo.save_inter_frames(last_frame, frame, opt, interp, flow, back_warp)
# write frames
video.write(last_frame)
print('This is the %d-th video %d-th interval. Original frame %d is saved' % (item + 1, i + 1, c - 1))
for k, interp_frame in enumerate(interp_frames):
video.write(interp_frame)
print('This is the %d-th video %d-th interval. Interpolated frames are saved %d times' % (item + 1, i + 1, k + 1))
# release the video
vc.release()
video.release()
cv2.destroyAllWindows()
print('Released!')
if __name__ == "__main__":
# Define parameters
parser = argparse.ArgumentParser()
parser.add_argument('--interval_second', type = int, default = 10, help = 'interval of second')
parser.add_argument('--crop_range', type = int, default = 1, help = 'the time range (second) for true video clip')
parser.add_argument('--target_range', type = int, default = 1, help = 'the time range (second) for output video clip')
parser.add_argument('--exposure_type', type = int, default = 40, help = 'e.g. exposure_type=8 means exposure time 1/8 seconds')
parser.add_argument('--resize_w', type = int, default = 2560, help = 'resize_w') # 3840, 1920
parser.add_argument('--resize_h', type = int, default = 1440, help = 'resize_h') # 2160, 1080
parser.add_argument('--checkpoint_path', type = str, \
default = './SuperSloMo/SuperSloMo.ckpt', \
help = 'model weight path')
parser.add_argument('--videopath', type = str, \
default = 'F:\\SenseTime\\Quad-Bayer to RGB Mapping\\data\\video_original\\Moscow Russia Aerial Drone 5K Timelab.pro _ Москва Россия Аэросъемка-S_dfq9rFWAE.webm', \
help = 'video path')
# F:\\SenseTime\\Quad-Bayer to RGB Mapping\\data\\video_original\\Dubai in 4K - City of Gold-SLaYPmhse30.webm
parser.add_argument('--video_folder_path', type = str, \
default = 'E:\\Deblur\\data collection\\video_original', \
help = 'video folder path')
parser.add_argument('--savepath', type = str, \
default = 'E:\\Deblur\\data collection\\video_original_interp_by_superslomo', \
help = 'save path')
opt = parser.parse_args()
print(opt)
# General information of processing folder
videolist = get_jpgs(opt.video_folder_path)
for i in range(len(videolist)):
print(i, videolist[i])
videolist = get_files(opt.video_folder_path)
# Process videos
get_interp_videos(opt)
|
[
"[email protected]"
] | |
957f2c59a82039e7ca05cb449191376e312de5d4
|
56b47728ffe36878096fac0d8fb0deb94a8a9b7c
|
/SQLdb.py
|
7140ce7027f76ebad847674b3e3bf46a455fe87a
|
[] |
no_license
|
CaMeLCa5e/dailyspring2015
|
1a930fc74930bb7d286956f17fcf36ec48802b4e
|
1b2039b9908407a31e951e44f66bafebf3d7422b
|
refs/heads/master
| 2016-09-05T19:54:44.918992 | 2015-05-24T23:51:39 | 2015-05-24T23:51:39 | 33,795,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
#! usr/bin/python
import MySQLdb
db = MySQLdb.connect("localhost", "testuser", "test123", "TESTDB")
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS EMPLOYEE")
# cursor.execute("SELECT VERSION()")
# data = cursor.fetchone()
# print "Database version : %s" %data
# db.close
sql = """CREATE TABLE EMPLOYEE (
FIRST_NAME CHAR(20) NOT NULL,
LAST_NALE CHAR(20),
AGE INT,
SEX CHAR(1)
INCOME FLOAT )"""
cursor.execute(sql)
db.close()
|
[
"[email protected]"
] | |
62da20225b5af908f0ff70e87fb3ad679eae1688
|
e52c7431f1b14444de52fd943a39fcaabeca21e4
|
/torch_geometric/sparse/__init__.py
|
faa61231fd6d50a6b07f253fe18fdf19e1b6117f
|
[] |
no_license
|
jwyang/pytorch_geometric
|
72d3a62f6991d90edb3b8da6445e18421f2174a8
|
31043b182248852768317a4185384390e95217d5
|
refs/heads/master
| 2021-08-30T16:16:03.613724 | 2017-12-18T15:52:08 | 2017-12-18T15:52:08 | 114,831,322 | 0 | 1 | null | 2017-12-20T02:02:54 | 2017-12-20T02:02:53 | null |
UTF-8
|
Python
| false | false | 228 |
py
|
from .sparse import SparseTensor
from .mm import mm
from .mm_diagonal import mm_diagonal
from .sum import sum
from .eye import eye
from .stack import stack
__all__ = ['SparseTensor', 'mm', 'mm_diagonal', 'sum', 'eye', 'stack']
|
[
"[email protected]"
] | |
627d1e6a2cdc3cf718162c2da7f7045a0cc2c408
|
7978cf6a612816b97beeb34e4ccc4a3f68c44767
|
/1/1_2.py
|
2b44561d8277813145276f3ac86f8525dc54c6aa
|
[] |
no_license
|
nemesmarci/Advent-of-Code-2018
|
13e9acd01b019ef0e890f0472c0c316a17dd60be
|
47dfac4afa69636428b722eb96fba2596bf8368c
|
refs/heads/master
| 2022-01-01T09:47:46.652193 | 2019-12-10T23:28:36 | 2021-12-29T19:48:02 | 159,982,537 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 315 |
py
|
with open('input.txt') as data:
lines = data.readlines()
frequency = 0
frequencies = set()
found = False
while not found:
for line in lines:
frequencies.add(frequency)
frequency += int(line)
if frequency in frequencies:
found = True
break
print(frequency)
|
[
"[email protected]"
] | |
c8da1b8b4b7af25bda9804933bb7b2f7157e54c2
|
06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b
|
/2016_schizConnect/supervised_analysis/all_studies+VIP/all_subjects/VBM/03_svm_centered_by_site.py
|
b562acd6e58bac7146ddd232c6f5774867785958
|
[] |
no_license
|
neurospin/scripts
|
6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458
|
f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb
|
refs/heads/master
| 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,397 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 23 10:04:13 2017
@author: ad247405
"""
import os
import json
import numpy as np
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import precision_recall_fscore_support
from scipy.stats import binom_test
from collections import OrderedDict
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from sklearn import svm
import pandas as pd
import shutil
WD = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/results/with_preserved_ratios/svm_centered_by_site_all'
def config_filename(): return os.path.join(WD,"config_dCV.json")
def results_filename(): return os.path.join(WD,"results_dCV.xlsx")
#############################################################################
def load_globals(config):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
def resample(config, resample_nb):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
resample = config["resample"][resample_nb]
GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]
for k in GLOBAL.DATA}
def mapper(key, output_collector):
import mapreduce as GLOBAL
Xtr = GLOBAL.DATA_RESAMPLED["X"][0]
Xte = GLOBAL.DATA_RESAMPLED["X"][1]
ytr = GLOBAL.DATA_RESAMPLED["y"][0]
yte = GLOBAL.DATA_RESAMPLED["y"][1]
c = float(key[0])
print("c:%f" % (c))
class_weight='balanced' # unbiased
mask = np.ones(Xtr.shape[0], dtype=bool)
scaler = preprocessing.StandardScaler().fit(Xtr)
Xtr = scaler.transform(Xtr)
Xte=scaler.transform(Xte)
mod = svm.LinearSVC(C=c,fit_intercept=False,class_weight= class_weight)
mod.fit(Xtr, ytr.ravel())
y_pred = mod.predict(Xte)
y_proba_pred = mod.decision_function(Xte)
ret = dict(y_pred=y_pred, y_true=yte,prob_pred = y_proba_pred, beta=mod.coef_, mask=mask)
if output_collector:
output_collector.collect(key, ret)
else:
return ret
def scores(key, paths, config):
import mapreduce
print (key)
values = [mapreduce.OutputCollector(p) for p in paths]
values = [item.load() for item in values]
y_true = [item["y_true"].ravel() for item in values]
y_pred = [item["y_pred"].ravel() for item in values]
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
prob_pred = [item["prob_pred"].ravel() for item in values]
prob_pred = np.concatenate(prob_pred)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
auc = roc_auc_score(y_true, prob_pred) #area under curve score.
#betas = np.hstack([item["beta"] for item in values]).T
# threshold betas to compute fleiss_kappa and DICE
#betas_t = np.vstack([array_utils.arr_threshold_from_norm2_ratio(betas[i, :], .99)[0] for i in range(betas.shape[0])])
#Compute pvalue
success = r * s
success = success.astype('int')
prob_class1 = np.count_nonzero(y_true) / float(len(y_true))
pvalue_recall0_true_prob = binom_test(success[0], s[0], 1 - prob_class1,alternative = 'greater')
pvalue_recall1_true_prob = binom_test(success[1], s[1], prob_class1,alternative = 'greater')
pvalue_recall0_unknwon_prob = binom_test(success[0], s[0], 0.5,alternative = 'greater')
pvalue_recall1_unknown_prob = binom_test(success[1], s[1], 0.5,alternative = 'greater')
pvalue_recall_mean = binom_test(success[0]+success[1], s[0] + s[1], p=0.5,alternative = 'greater')
scores = OrderedDict()
try:
a, l1, l2 , tv = [float(par) for par in key.split("_")]
scores['a'] = a
scores['l1'] = l1
scores['l2'] = l2
scores['tv'] = tv
left = float(1 - tv)
if left == 0: left = 1.
scores['l1_ratio'] = float(l1) / left
except:
pass
scores['recall_0'] = r[0]
scores['recall_1'] = r[1]
scores['recall_mean'] = r.mean()
scores["auc"] = auc
scores['pvalue_recall0_true_prob_one_sided'] = pvalue_recall0_true_prob
scores['pvalue_recall1_true_prob_one_sided'] = pvalue_recall1_true_prob
scores['pvalue_recall0_unknwon_prob_one_sided'] = pvalue_recall0_unknwon_prob
scores['pvalue_recall1_unknown_prob_one_sided'] = pvalue_recall1_unknown_prob
scores['pvalue_recall_mean'] = pvalue_recall_mean
#scores['prop_non_zeros_mean'] = float(np.count_nonzero(betas_t)) / \
# float(np.prod(betas.shape))
scores['param_key'] = key
return scores
def reducer(key, values):
import os, glob, pandas as pd
os.chdir(os.path.dirname(config_filename()))
config = json.load(open(config_filename()))
paths = glob.glob(os.path.join(config['map_output'], "*", "*", "*"))
#paths = [p for p in paths if not p.count("0.8_-1")]
def close(vec, val, tol=1e-4):
return np.abs(vec - val) < tol
def groupby_paths(paths, pos):
groups = {g:[] for g in set([p.split("/")[pos] for p in paths])}
for p in paths:
groups[p.split("/")[pos]].append(p)
return groups
def argmaxscore_bygroup(data, groupby='fold', param_key="param_key", score="recall_mean"):
arg_max_byfold = list()
for fold, data_fold in data.groupby(groupby):
assert len(data_fold) == len(set(data_fold[param_key])) # ensure all param are diff
arg_max_byfold.append([fold, data_fold.ix[data_fold[score].argmax()][param_key], data_fold[score].max()])
return pd.DataFrame(arg_max_byfold, columns=[groupby, param_key, score])
print('## Refit scores')
print('## ------------')
byparams = groupby_paths([p for p in paths if p.count("all") and not p.count("all/all")],3)
byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}
data = [list(byparams_scores[k].values()) for k in byparams_scores]
columns = list(byparams_scores[list(byparams_scores.keys())[0]].keys())
scores_refit = pd.DataFrame(data, columns=columns)
print('## doublecv scores by outer-cv and by params')
print('## -----------------------------------------')
data = list()
bycv = groupby_paths([p for p in paths if p.count("cvnested")],1)
for fold, paths_fold in bycv.items():
print(fold)
byparams = groupby_paths([p for p in paths_fold], 3)
byparams_scores = {k:scores(k, v, config) for k, v in byparams.items()}
data += [[fold] + list(byparams_scores[k].values()) for k in byparams_scores]
scores_dcv_byparams = pd.DataFrame(data, columns=["fold"] + columns)
print('## Model selection')
print('## ---------------')
svm = argmaxscore_bygroup(scores_dcv_byparams); svm["method"] = "svm"
scores_argmax_byfold = svm
print('## Apply best model on refited')
print('## ---------------------------')
scores_svm = scores("nestedcv", [os.path.join(config['map_output'], row["fold"], "all", row["param_key"]) for index, row in svm.iterrows()], config)
scores_cv = pd.DataFrame([["svm"] + list(scores_svm.values())], columns=["method"] + list(scores_svm.keys()))
with pd.ExcelWriter(results_filename()) as writer:
scores_refit.to_excel(writer, sheet_name='cv_by_param', index=False)
scores_dcv_byparams.to_excel(writer, sheet_name='cv_cv_byparam', index=False)
scores_argmax_byfold.to_excel(writer, sheet_name='cv_argmax', index=False)
scores_cv.to_excel(writer, sheet_name='dcv', index=False)
##############################################################################
if __name__ == "__main__":
WD = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/results/with_preserved_ratios/svm_centered_by_site_all'
INPUT_DATA_X = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/mean_centered_by_site_all/X.npy'
INPUT_DATA_y = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/mean_centered_by_site_all/y.npy'
INPUT_MASK_PATH = '/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/mean_centered_by_site_all/mask.nii'
NFOLDS_OUTER = 5
NFOLDS_INNER = 5
shutil.copy(INPUT_DATA_X, WD)
shutil.copy(INPUT_DATA_y, WD)
shutil.copy(INPUT_MASK_PATH, WD)
#############################################################################
## Create config file
y = np.load(INPUT_DATA_y)
fold1 = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/fold_stratified/fold1.npy")
fold2 = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/fold_stratified/fold2.npy")
fold3 = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/fold_stratified/fold3.npy")
fold4 = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/fold_stratified/fold4.npy")
fold5 = np.load("/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/fold_stratified/fold5.npy")
## Create config file
cv_outer = [[tr, te] for tr,te in StratifiedKFold(y.ravel(), n_folds=NFOLDS_OUTER, random_state=42)]
cv_outer[0][0] = np.concatenate((fold2,fold3,fold4,fold5))
cv_outer[0][1] = fold1
cv_outer[1][0] = np.concatenate((fold1,fold3,fold4,fold5))
cv_outer[1][1] = fold2
cv_outer[2][0] = np.concatenate((fold1,fold2,fold4,fold5))
cv_outer[2][1] = fold3
cv_outer[3][0] = np.concatenate((fold1,fold2,fold3,fold5))
cv_outer[3][1] = fold4
cv_outer[4][0] = np.concatenate((fold1,fold2,fold3,fold4))
cv_outer[4][1] = fold5
#
import collections
cv = collections.OrderedDict()
for cv_outer_i, (tr_val, te) in enumerate(cv_outer):
cv["cv%02d/all" % (cv_outer_i)] = [tr_val, te]
cv_inner = StratifiedKFold(y[tr_val].ravel(), n_folds=NFOLDS_INNER, random_state=42)
for cv_inner_i, (tr, val) in enumerate(cv_inner):
cv["cv%02d/cvnested%02d" % ((cv_outer_i), cv_inner_i)] = [tr_val[tr], tr_val[val]]
for k in cv:
cv[k] = [cv[k][0].tolist(), cv[k][1].tolist()]
print(list(cv.keys()))
C_range = [[100],[10],[1],[1e-1],[1e-2],[1e-3],[1e-4],[1e-5],[1e-6],[1e-7],[1e-8],[1e-9]]
user_func_filename = "/home/ad247405/git/scripts/2016_schizConnect/supervised_analysis/all_studies+VIP/all_subjects/VBM/03_svm_centered_by_site.py"
config = dict(data=dict(X="X.npy", y="y.npy"),
params=C_range, resample=cv,
structure="mask.nii",
map_output="model_selectionCV",
user_func=user_func_filename,
reduce_input="results/*/*",
reduce_group_by="params",
reduce_output="model_selectionCV.csv")
json.dump(config, open(os.path.join(WD, "config_dCV.json"), "w"))
# Build utils files: sync (push/pull) and PBS
import brainomics.cluster_gabriel as clust_utils
sync_push_filename, sync_pull_filename, WD_CLUSTER = \
clust_utils.gabriel_make_sync_data_files(WD)
cmd = "mapreduce.py --map %s/config_dCV.json" % WD_CLUSTER
clust_utils.gabriel_make_qsub_job_files(WD, cmd,walltime = "250:00:00")
|
[
"[email protected]"
] | |
26fabfda61115811b13d95b272a0c78d93ef5adb
|
2ca91d379b291a4e7f5e804a63bb43f8bf316adf
|
/transmutator/orchestration.py
|
4ce766f5a01531d50647193e7696e42afa9455f4
|
[
"BSD-3-Clause"
] |
permissive
|
benoitbryon/transmutator
|
918146ebfdd67ca67ac7f97715f8d59d745c32da
|
865a275a601cd735a131a58576aa12c68510b644
|
refs/heads/master
| 2021-01-17T13:21:39.027197 | 2015-06-24T10:30:58 | 2015-06-24T10:30:58 | 12,803,979 | 0 | 1 | null | 2016-06-27T21:40:38 | 2013-09-13T07:30:30 |
Python
|
UTF-8
|
Python
| false | false | 7,563 |
py
|
import os
import shutil
from xal.session.local import LocalSession
class Orchestrator(object):
def __init__(self):
root_dir = os.path.abspath(os.getcwd())
self.mutations_dir = os.path.join(root_dir, 'mutations')
self.working_dir = os.path.join(root_dir, 'var', 'transmutator')
if not os.path.isdir(self.working_dir):
os.makedirs(self.working_dir)
self.todo_dir = os.path.join(self.working_dir, 'todo')
if not os.path.isdir(self.todo_dir):
os.makedirs(self.todo_dir)
self.doing_dir = os.path.join(self.working_dir, 'doing')
if not os.path.isdir(self.doing_dir):
os.makedirs(self.doing_dir)
self.done_dir = os.path.join(self.working_dir, 'done')
if not os.path.isdir(self.done_dir):
os.makedirs(self.done_dir)
def mutation_sourcefile(self, mutation):
"""Return absolute filename to mutation."""
return os.path.join(self.mutations_dir, mutation)
def is_mutation(self, mutation):
"""Return ``True`` if ``mutation`` is path to an executable file."""
return os.access(self.mutation_sourcefile(mutation), os.X_OK)
def is_done(self, mutation):
"""Return ``True`` if ``mutation`` has already been performed."""
return os.path.isfile(os.path.join(self.done_dir, mutation))
def is_new(self, mutation):
"""Return ``True`` if ``mutation`` has not been performed yet."""
return not os.path.exists(os.path.join(self.done_dir, mutation))
def is_recurrent(self, mutation):
"""Return ``True`` if ``mutation`` has to be performed on every run.
On forward, recurrent mutations are not skipped, they go forward.
"""
return mutation.startswith('recurrent/')
def is_in_development(self, mutation):
"""Return ``True`` if ``mutation`` is in development.
On forward, in-development mutations go backward and forward.
"""
return mutation.startswith('development')
def collect_mutations(self):
"""Iterates over all available mutations, whatever their status.
The return iterator is not sorted.
"""
for (dirpath, dirnames, filenames) in os.walk(self.mutations_dir):
for filename in filenames:
relative_dirname = dirpath[len(self.mutations_dir):]
relative_dirname = relative_dirname.lstrip(os.path.sep)
relative_filename = os.path.join(relative_dirname, filename)
yield relative_filename
def register_mutation(self, mutation):
"""Register mutation as TODO or DONE."""
todo = self.is_new(mutation) or \
self.is_in_development(mutation) or \
self.is_recurrent(mutation)
if todo:
dest = os.path.join(self.todo_dir, mutation)
if not os.path.isdir(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
shutil.copy2(os.path.join(self.mutations_dir, mutation), dest)
def start_mutation(self, mutation):
"""Mark mutation from TODO to DOING.:"""
todo = os.path.join(self.todo_dir, mutation)
todo_dir = os.path.dirname(todo)
doing = os.path.join(self.doing_dir, mutation)
if not os.path.isdir(os.path.dirname(doing)):
os.makedirs(os.path.dirname(doing))
if self.is_recurrent(mutation):
shutil.copy2(todo, doing)
else:
shutil.move(todo, doing)
if todo_dir != self.todo_dir and not os.listdir(todo_dir):
shutil.rmtree(todo_dir)
def todo_releases(self):
"""Return ordered list of releases to process."""
releases = []
noname_release = False
development_release = False
for name in os.listdir(self.todo_dir):
if os.path.isdir(os.path.join(self.todo_dir, name)):
if name == 'development':
development_release = True
elif name == 'recurrent':
pass
else:
releases.append(name)
else:
noname_release = True
releases.sort()
if noname_release:
releases.insert(0, '')
if development_release:
releases.append('development')
return releases
def todo_recurrent(self):
"""Return ordered list of recurrent mutations."""
files = os.listdir(os.path.join(self.todo_dir, 'recurrent'))
files.sort()
return [os.path.join('recurrent', name) for name in files]
def todo_mutations(self, release):
files = []
recurrent_mutations = self.todo_recurrent()
absolute_release = os.path.join(self.todo_dir, release)
for filename in os.listdir(absolute_release):
if os.path.isfile(os.path.join(absolute_release, filename)):
relative_filename = os.path.join(release, filename)
files.append((filename, relative_filename))
for recurrent in recurrent_mutations:
files.append((recurrent[len('recurrent/'):], recurrent))
files.sort()
files = [mutation for f, mutation in files]
return files
def forward_mutation(self, mutation):
print('## FORWARD mutation "{name}"'.format(name=mutation))
session = LocalSession()
sh = session.sh
result = sh.run(os.path.join(self.doing_dir, mutation))
print(result.stdout)
def backward_mutation(self, mutation):
print('## BACKWARD mutation "{name}"'.format(name=mutation))
session = LocalSession()
sh = session.sh
result = sh.run([
os.path.join(self.doing_dir, mutation),
'--backward'])
print(result.stdout)
def run_mutation(self, mutation):
do_backward = (self.is_done(mutation)
and self.is_in_development(mutation))
do_forward = True
if do_backward:
self.backward_mutation(mutation)
if do_forward:
self.forward_mutation(mutation)
def success_mutation(self, mutation):
"""Mark mutation as DONE.:"""
doing = os.path.join(self.doing_dir, mutation)
doing_dir = os.path.dirname(doing)
done = os.path.join(self.done_dir, mutation)
if not os.path.isdir(os.path.dirname(done)):
os.makedirs(os.path.dirname(done))
if not self.is_recurrent(mutation):
shutil.move(doing, done)
if doing_dir != self.doing_dir and not os.listdir(doing_dir):
shutil.rmtree(doing_dir)
def error_mutation(self, mutation):
"""Register error and warn user."""
print('ERROR with mutation "{name}"'.format(name=mutation))
def run_mutations(self):
for mutation in self.collect_mutations():
self.register_mutation(mutation)
for release in self.todo_releases():
print('#### Processing release "{name}" ####'.format(name=release))
for mutation in self.todo_mutations(release):
self.start_mutation(mutation)
try:
self.run_mutation(mutation)
except:
self.error_mutation(mutation)
else:
self.success_mutation(mutation)
recurrent_dir = os.path.join(self.todo_dir, 'recurrent')
if os.path.exists(recurrent_dir) and os.listdir(recurrent_dir):
shutil.rmtree(recurrent_dir)
|
[
"[email protected]"
] | |
0b281d03ea9a0a92a7cdb82652e65812e7c55bce
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2145/60708/269546.py
|
04f455752d5ae779e8aa18a822389bff6d25d85e
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 336 |
py
|
N=int(input())
for n in range(0,N):
temp=input().split(" ")
l=int(input())
list=[]
for item in temp:
list.append(int(item))
maxresult=0
for x in range(1,l+1):
for y in range(0,l-x+1):
h=min(list[y:y+x])
if(h*x>maxresult):
maxresult=h*x
print(maxresult)
|
[
"[email protected]"
] | |
f42c0bc6db94794cbf3dbc31077f0801d2b140d3
|
804ce3c2897a8720a27e0d86ac3b868ebd41cd20
|
/project-data/django/mango/mango/wsgi.py
|
518142e6201c5cda775ff0c78d6761836370bc36
|
[] |
no_license
|
hoboland21/mango
|
383359aa85b685bfe77c6336974600038454cf80
|
be8bf3398612a0c3dbb4498eb5eb18407c574ce3
|
refs/heads/master
| 2023-07-13T06:25:39.508434 | 2021-08-25T03:25:37 | 2021-08-25T03:25:37 | 399,520,705 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 491 |
py
|
import sys,os
"""
WSGI config for main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
#if "/usr/local/django/mango" not in sys.path :
# sys.path.insert(0,"/usr/local/django/mango")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mango.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
[
"[email protected]"
] | |
aa6d701c19dc52cbb0c3abdfa5fa1970d39343be
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2482/60829/280705.py
|
5973613bdc5d7fe19ce40088faaa5a99d02f2080
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 188 |
py
|
n=int(input())
for p in range(n):
a=int(input())
b=int(input())
k=a/b
if k==1.6666666666666667:
k=1.(6)
if k==2.6666666666666665:
k=2.(6)
print(a/b)
|
[
"[email protected]"
] | |
0ee978c945a22cfd723c0a2e287d0e327ea507df
|
48fcd5b9203c5f34dcad9483259c0f3d46f5d48b
|
/codeacademy-python3/files/how_many_lines.py
|
79a082fa607ebffa16e832bc8a67fed867241a6f
|
[] |
no_license
|
ssaulrj/codes-python
|
438dd691815d0a688d264928eb07187ba30c2138
|
04b75b001de60a5e202ad373f3379864753ce203
|
refs/heads/master
| 2022-11-17T11:40:18.883096 | 2020-07-06T00:57:58 | 2020-07-06T00:57:58 | 234,440,220 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 152 |
py
|
with open('how_many_lines.txt') as lines_doc:
#lines_docx = lines_doc.read()
for line in lines_doc.readlines():
print(line)
#print(lines_docx)
|
[
"[email protected]"
] | |
ec62c1d46aabfd5b1918edd414451252d7613fff
|
f8eea4a4cc079ba830a27a2ce239aef451ed6597
|
/test/ec/test_model.py
|
b9d4383639a0120de5a832c55c6614b5050ba089
|
[
"MIT"
] |
permissive
|
qalmaqihir/pyecsca
|
f37a32a00ea47fff1db0d5bb42b28df7cce6b587
|
28546dad01a25ce101d6b49924f521c2ef5ffa98
|
refs/heads/master
| 2023-02-18T19:59:11.612457 | 2021-01-22T16:02:25 | 2021-01-23T00:15:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
from unittest import TestCase
from pyecsca.ec.model import (ShortWeierstrassModel, MontgomeryModel, EdwardsModel,
TwistedEdwardsModel)
class CurveModelTests(TestCase):
def test_load(self):
self.assertGreater(len(ShortWeierstrassModel().coordinates), 0)
self.assertGreater(len(MontgomeryModel().coordinates), 0)
self.assertGreater(len(EdwardsModel().coordinates), 0)
self.assertGreater(len(TwistedEdwardsModel().coordinates), 0)
|
[
"[email protected]"
] | |
f2f95abfa48576405b22de0fe042f561eb265d28
|
c8453f83242cd525a98606f665d9f5d9e84c6335
|
/lib/surface/container/images/list_tags.py
|
31d68c01369d00dbd98fec0fd6289bf87e7c0617
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
paulfoley/GCP-Cloud_SDK
|
5188a04d8d80a2709fa3dba799802d57c7eb66a1
|
bec7106686e99257cb91a50f2c1b1a374a4fc66f
|
refs/heads/master
| 2021-06-02T09:49:48.309328 | 2017-07-02T18:26:47 | 2017-07-02T18:26:47 | 96,041,222 | 1 | 1 |
NOASSERTION
| 2020-07-26T22:40:49 | 2017-07-02T18:19:52 |
Python
|
UTF-8
|
Python
| false | false | 3,412 |
py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List tags command."""
import argparse
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image
from googlecloudsdk.api_lib.container.images import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import http
# Add to this as we add columns.
_DEFAULT_KINDS = [
'BUILD_DETAILS',
'IMAGE_BASIS',
'PACKAGE_VULNERABILITY',
]
class ListTags(base.ListCommand):
"""List tags and digests for the specified image."""
detailed_help = {
'DESCRIPTION':
"""\
The container images list-tags command of gcloud lists metadata about
tags and digests for the specified container image. Images must be
hosted by the Google Container Registry.
""",
'EXAMPLES':
"""\
List the tags in a specified image:
$ {{command}} gcr.io/myproject/myimage
""",
}
def Collection(self):
return 'container.tags'
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'--show-occurrences',
action='store_true',
default=False,
help=argparse.SUPPRESS)
parser.add_argument(
'--occurrence-filter',
default=' OR '.join(
['kind = "{kind}"'.format(kind=x) for x in _DEFAULT_KINDS]),
help=argparse.SUPPRESS)
parser.add_argument(
'image',
help='The name of the image. Format: *.gcr.io/repository/image')
# Does nothing for us, included in base.ListCommand
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Raises:
InvalidImageNameError: If the user specified an invalid image name.
Returns:
Some value that we want to have printed later.
"""
repository = util.ValidateRepositoryPath(args.image)
http_obj = http.Http()
with docker_image.FromRegistry(
basic_creds=util.CredentialProvider(),
name=repository,
transport=http_obj) as image:
try:
return util.TransformManifests(
image.manifests(),
repository,
show_occurrences=args.show_occurrences,
occurrence_filter=args.occurrence_filter)
except docker_http.V2DiagnosticException as err:
raise util.GcloudifyRecoverableV2Errors(err, {
403: 'Access denied: {0}'.format(repository),
404: 'Not found: {0}'.format(repository)
})
|
[
"[email protected]"
] | |
eb100eed015d6d6c69d5645791a5c9cc4b19b5cd
|
6114a1313ca1193343fac049d0f3cf9e15438829
|
/Chap0/project/guess.py
|
d1d399e95d5685e53d126aa80a8656a4ac77bad9
|
[] |
no_license
|
AIHackerTest/Hansoluo_Py101-004
|
0d49bb12158d2d6f8c430c407d739336de7d0ef3
|
1bb2d1810ec286e16cf12165e75472edd7c5d29a
|
refs/heads/master
| 2021-06-28T01:54:57.478192 | 2017-09-12T08:23:02 | 2017-09-12T08:23:02 | 103,240,275 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 648 |
py
|
# -*- coding: utf-8 -*-
# 功能描述
# 程序随机生成一个20以内的数字,用户有10次机会猜测
# 程序根据用户输入,给予一定提示(大了,小了,正确)
# 猜对或用完10次机会,游戏结束
import random
# random.randint(a, b):Return a random integer N such that a <= N <= b
a = random.randint(1,20)
for i in range(1,11):
b = int(input("请猜测20以内的数字:"))
if a > b:
print("小了")
elif a < b:
print("大了")
else:
print("正确")
break
print("你还有 {0} 次机会".format(10-i))
i += 1
print ('游戏结束')
|
[
"[email protected]"
] | |
79a6440763200a87a0690bc6149639063b1f6735
|
4fccfbe5c9cdf595dd3bdb182a6214f593bfd808
|
/nubia/internal/completion.py
|
f205db47f9a60830fd4226a834539959b0d47836
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
MountakBernotas/AzurePython
|
a3838207cb7ed9f3cd5e5b34aadd0a4eb6ca985d
|
5f34b13be8aefc08c491eebaf1692b75b813e1f5
|
refs/heads/master
| 2020-03-28T16:25:30.226681 | 2018-09-13T20:37:30 | 2018-09-13T20:37:30 | 148,693,461 | 0 | 1 |
NOASSERTION
| 2018-12-13T13:33:54 | 2018-09-13T20:22:54 |
Python
|
UTF-8
|
Python
| false | false | 10,195 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import itertools
import pyparsing as pp
from nubia.internal.helpers import function_to_str
from typing import Iterable, TYPE_CHECKING
from nubia.internal import parser
from prompt_toolkit.document import Document
from prompt_toolkit.completion import CompleteEvent
from prompt_toolkit.completion import Completion
if TYPE_CHECKING:
from nubia.internal.cmdbase import AutoCommand # noqa
class TokenParse:
"""
This class captures an interactive shell token that cannot be fully parser
by the interactive shell parser and analyze it.
"""
def __init__(self, token: str) -> None:
self._token = token
self._key = ""
self._is_argument = False
self._is_list = False
self._is_dict = False
self._last_value = ""
self.parse()
def parse(self):
key, delim, value = self._token.partition("=")
# Is everything before the = sane?
if any(x in key for x in "[]{}\"'"):
# We will treat this as positional in this case
return
# This is key=value
if delim == "=":
self._is_argument = True
self._key = key
else:
# This is positional, the value is the key
value = self._key
assert len(value) == 0
if len(value) > 0:
# Let's parse the value, is it a single, list, dict?
if value[0] == "[":
self._is_list = True
value = value.strip("[")
list_values = value.rpartition(",")
self._last_value = list_values[len(list_values) - 1].lstrip()
elif value[0] == "{":
self._is_dict = True
else:
self._last_value = value
@property
def is_argument(self) -> bool:
return self._is_argument
@property
def is_positional(self) -> bool:
return not self._is_argument
# Talks about the type of the value
@property
def is_list(self) -> bool:
return self._is_list
@property
def is_dict(self) -> bool:
return self._is_dict
@property
def argument_name(self) -> str:
assert self._is_argument
return self._key
def keys(self) -> Iterable[str]:
return []
def values(self) -> Iterable[str]:
return []
@property
def last_value(self) -> str:
return self._last_value
@property
def is_single_value(self) -> bool:
return not (self._is_dict or self._is_list)
class AutoCommandCompletion:
"""
This is the interactive completion state machine, it tracks the
parsed tokens out of a command input and builds a data model that is
used to understand what would be the next natural completion
token(s).
"""
def __init__(
self,
cmd_obj: "AutoCommand",
document: Document,
complete_event: CompleteEvent,
) -> None:
self.doc = document
self.cmd = cmd_obj
self.meta = self.cmd.metadata
self.event = complete_event
# current state
def get_completions(self) -> Iterable[Completion]:
"""
Returns a
"""
logger = logging.getLogger(f"{type(self).__name__}.get_completions")
remaining = None
try:
parsed = parser.parse(
self.doc.text, expect_subcommand=self.cmd.super_command
)
except parser.CommandParseError as e:
parsed = e.partial_result
remaining = e.remaining
# This is a funky but reliable way to figure that last token we are
# interested in manually parsing, This will return the last key=value
# including if the value is a 'value', [list], or {dict} or combination
# of these. This also matches positional arguments.
if self.doc.char_before_cursor in " ]}":
last_token = ""
else:
last_space = (
self.doc.find_backwards(" ", in_current_line=True) or -1
)
last_token = self.doc.text[(last_space + 1) :] # noqa
# We pick the bigger match here. The reason we want to look into
# remaining is to capture the state that we are in an open list,
# dictionary, or any other value that may have spaces in it but fails
# parsing (yet).
if remaining and len(remaining) > len(last_token):
last_token = remaining
try:
return self._prepare_args_completions(
parsed_command=parsed, last_token=last_token
)
except Exception as e:
logger.exception(str(e))
return []
def _prepare_args_completions(
self, parsed_command: pp.ParseResults, last_token
) -> Iterable[Completion]:
assert parsed_command is not None
args_meta = self.meta.arguments.values()
# are we expecting a sub command?
if self.cmd.super_command:
# We have a sub-command (supposedly)
subcommand = parsed_command.get("__subcommand__")
assert subcommand
sub_meta = self.cmd.subcommand_metadata(subcommand)
if not sub_meta:
logging.debug("Parsing unknown sub-command failed!")
return []
# we did find the sub-command, yay!
# In this case we chain the arguments from super and the
# sub-command together
args_meta = itertools.chain(args_meta, sub_meta.arguments.values())
# Now let's see if we can figure which argument we are talking about
args_meta = self._filter_arguments_by_prefix(last_token, args_meta)
# Which arguments did we fully parse already? let's avoid printing them
# in completions
parsed_keys = parsed_command.asDict().get("kv", [])
# We are either completing an argument name, argument value, or
# positional value.
# Dissect the last_token and figure what is the right completion
parsed_token = TokenParse(last_token)
if parsed_token.is_positional:
# TODO: Handle positional argument completions too
# To figure which positional we are in right now, we need to run the
# same logic that figures if all required arguments has been
# supplied and how many positionals have been processed and which
# one is next.
# This code is already in cmdbase.py run_interactive but needs to be
# refactored to be reusable here.
pass
elif parsed_token.is_argument:
argument_name = parsed_token.argument_name
arg = self._find_argument_by_name(argument_name)
if not arg or arg.choices in [False, None]:
return []
# TODO: Support dictionary keys/named tuples completion
if parsed_token.is_dict:
return []
# We are completing a value, in this case, we need to get the last
# meaninful piece of the token `x=[Tr` => `Tr`
return [
Completion(
text=str(choice),
start_position=-len(parsed_token.last_value),
)
for choice in arg.choices
if str(choice)
.lower()
.startswith(parsed_token.last_value.lower())
]
# We are completing arguments, or positionals.
# TODO: We would like to only show positional choices if we exhaust all
# required arguments. This will make it easier for the user to figure
# that there are still required named arguments. After that point we
# will show optional arguments and positionals as possible completions
ret = [
Completion(
text=arg_meta.name + "=",
start_position=-len(last_token),
display_meta=self._get_arg_help(arg_meta),
)
for arg_meta in args_meta
if arg_meta.name not in parsed_keys
]
return ret
def _filter_arguments_by_prefix(self, prefix: str, arguments=None):
arguments = arguments or self.meta.arguments.values()
if prefix:
return [
arg_meta
for arg_meta in arguments
if arg_meta.name.startswith(prefix)
]
return arguments
def _prepare_value_completions(self, prefix, partial_result):
parsed_keys = map(lambda x: x[0], partial_result.get("kv", []))
argument, rest = prefix.split("=", 1)
arguments = self._filter_arguments_by_prefix(argument)
if len(arguments) < 1:
return []
if len(arguments) == 1:
argument_obj = self._find_argument_by_name(argument)
assert argument_obj
# was that argument used before?
if argument in parsed_keys:
logging.debug(
"Argument {} was used already, not generating "
"completions".format(argument)
)
return []
return []
def _find_argument_by_name(self, name):
args_meta = self.meta.arguments.values()
filtered = filter(lambda arg: arg.name == name, args_meta)
return next(filtered, None)
def _get_arg_help(self, arg_meta):
sb = ["["]
if arg_meta.type:
sb.append(function_to_str(arg_meta.type, False, False))
sb.append(", ")
if arg_meta.default_value_set:
sb.append("default: ")
sb.append(arg_meta.default_value)
else:
sb.append("required")
sb.append("] ")
sb.append(
arg_meta.description
if arg_meta.description
else "<no description provided>"
)
return "".join(str(item) for item in sb)
|
[
"[email protected]"
] | |
970e75fa5bf67b2442c0053f46b9b75a1bd89e12
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/__init__.py
|
bd9846f799c29d00ccefc844e142c82e2631c10c
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500 | 2021-11-06T06:31:20 | 2021-11-06T06:31:20 | 62,589,715 | 8 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 28 |
py
|
from xcp2k.cp2k import CP2K
|
[
"[email protected]"
] | |
8cd41ee8c833fb7d76ec5d6fcc4ef5a36db55050
|
a5a7c59b04a1a64fe34653c7970c3cf173f9c1df
|
/io/swig/io/gnuplot_export.py
|
a8504306a3b7c530dfb745634aee77975fbd973d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
siconos/siconos
|
a7afdba41a2bc1192ad8dcd93ac7266fa281f4cf
|
82a8d1338bfc1be0d36b5e8a9f40c1ad5384a641
|
refs/heads/master
| 2023-08-21T22:22:55.625941 | 2023-07-17T13:07:32 | 2023-07-17T13:07:32 | 37,709,357 | 166 | 33 |
Apache-2.0
| 2023-07-17T12:31:16 | 2015-06-19T07:55:53 |
C
|
UTF-8
|
Python
| false | false | 3,345 |
py
|
import os,sys
import h5py
import numpy
filename = '{0}.hdf5'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
withPlot=False
print filename
out= h5py.File(filename, 'r')
def group(h, name):
try:
return h[name]
except KeyError:
return h.create_group(name)
def data(h, name, nbcolumns):
try:
return h[name]
except KeyError:
return h.create_dataset(name, (0, nbcolumns),
maxshape=(None, nbcolumns))
_data = group(out, 'data')
ref = group(_data, 'ref')
joints = group(_data, 'joints')
static_data = data(_data, 'static', 9)
velocities_data = data(_data, 'velocities', 8)
dynamic_data = data(_data, 'dynamic', 9)
cf_data = data(_data, 'cf', 15)
solv_data = data(_data, 'solv', 4)
input = group(_data, 'input')
nslaws = group(_data, 'nslaws')
dpos_data = dynamic_data
max_time = max(dpos_data[:, 0])
times = list(set(dpos_data[:, 0]))
times.sort()
ndyna = len(numpy.where(dpos_data[:, 0] == times[0]))
ntime=len(times)
print('time range :', times[0], times[-1])
print('ndyna :', ndyna)
print('ntime:', ntime)
instances = set(dpos_data[:, 1])
#output_dict = {}
#output_dict[1]= [1,2,3]
######## position output ########
nvalue = ndyna*7+1
position_output = numpy.empty((ntime,nvalue))
#print('position_output shape', numpy.shape(position_output))
position_output[:,0] = times[:]
for t in range(len(times)):
for i in range(ndyna):
position_output[t,1+i*7:1+(1+i)*7] = dpos_data[t*ndyna+ndyna, 2:9]
#print('position_output', position_output)
filename_output = '{0}_position.dat'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
print('output file:', filename_output)
numpy.savetxt(filename_output, position_output)
######## position output ########
nvalue = ndyna*6+1
velocity_output = numpy.empty((ntime,nvalue))
#print('position_output shape', numpy.shape(position_output))
velocity_output[:,0] = times[:]
for t in range(len(times)):
for i in range(ndyna):
velocity_output[t,1+i*6:1+(1+i)*6] = velocities_data[t*ndyna+ndyna, 2:8]
#print('position_output', position_output)
filename_output = '{0}_velocity.dat'.format(os.path.splitext(os.path.basename(sys.argv[1]))[0])
print('output file:', filename_output)
numpy.savetxt(filename_output, velocity_output)
if withPlot:
import matplotlib
havedisplay = "DISPLAY" in os.environ
if not havedisplay:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.subplot(411)
plt.title('position x')
plt.plot(position_output[:, 0], position_output[:, 1])
plt.subplot(412)
plt.title('position y')
plt.plot(position_output[:, 0], position_output[:, 2])
plt.subplot(413)
plt.title('position z ')
plt.plot(position_output[:, 0], position_output[:, 3])
plt.figure()
plt.subplot(411)
plt.title('orientation q0')
plt.plot(position_output[:, 0], position_output[:, 4])
plt.subplot(412)
plt.title('orientation q1')
plt.plot(position_output[:, 0], position_output[:, 5])
plt.subplot(413)
plt.title('orientation q2 ')
plt.plot(position_output[:, 0], position_output[:, 6])
plt.subplot(414)
plt.title('orientation q3 ')
plt.plot(position_output[:, 0], position_output[:, 7])
if havedisplay:
plt.show()
else:
plt.savefig("bbts.png")
|
[
"[email protected]"
] | |
0ed2f6c7c8ca1dc78db9c05e4e5ca005bb389f3d
|
76a61fa52ab282501992ac889665bce01f2cdd62
|
/examples/REINFORCE/linear.py
|
7fb7f97d19853cd61fb1e43f6ee1644fbdf43297
|
[
"Apache-2.0"
] |
permissive
|
diogo149/treeano
|
35ae0f9d0c0bbcb9ca1ff8856ba527e2d19b6194
|
9b3fd6bb5eb2f6738c9e5c357e70bef95dcae7b7
|
refs/heads/master
| 2020-04-06T07:05:19.946985 | 2016-08-11T15:47:58 | 2016-08-11T15:47:58 | 34,579,507 | 45 | 13 | null | 2016-02-03T07:32:45 | 2015-04-25T17:58:17 |
Python
|
UTF-8
|
Python
| false | false | 2,456 |
py
|
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import REINFORCE
fX = theano.config.floatX
TARGET_WEIGHT = np.random.randn(10, 2).astype(fX)
TARGET_BIAS = np.random.randn(2).astype(fX)
class RewardNode(treeano.NodeImpl):
input_keys = ("state", "sampled")
def compute_output(self, network, state_vw, sampled_vw):
W = T.constant(TARGET_WEIGHT)
b = T.constant(TARGET_BIAS)
target = T.dot(state_vw.variable, W) + b.dimshuffle("x", 0)
reward = -T.sqr(sampled_vw.variable - target).sum(axis=1)
network.create_vw(
"raw_reward",
variable=T.mean(reward),
shape=(),
)
baseline_reward = 100
network.create_vw(
"default",
variable=reward + baseline_reward,
shape=(state_vw.shape[0],),
tags={"output"},
)
BATCH_SIZE = 64
graph = tn.GraphNode(
"graph",
[[tn.InputNode("state", shape=(BATCH_SIZE, 10)),
tn.DenseNode("mu", num_units=2),
tn.ConstantNode("sigma", value=1.),
REINFORCE.NormalSampleNode("sampled"),
RewardNode("reward"),
REINFORCE.NormalREINFORCECostNode("REINFORCE")],
[{"from": "state", "to": "mu"},
{"from": "mu", "to": "sampled", "to_key": "mu"},
{"from": "sigma", "to": "sampled", "to_key": "sigma"},
{"from": "sampled", "to": "reward", "to_key": "sampled"},
{"from": "state", "to": "reward", "to_key": "state"},
{"from": "state", "to": "REINFORCE", "to_key": "state"},
{"from": "mu", "to": "REINFORCE", "to_key": "mu"},
{"from": "sigma", "to": "REINFORCE", "to_key": "sigma"},
{"from": "reward", "to": "REINFORCE", "to_key": "reward"},
{"from": "sampled", "to": "REINFORCE", "to_key": "sampled"},
{"from": "REINFORCE"}]]
)
network = tn.AdamNode(
"adam",
{"subtree": graph,
"cost": tn.ReferenceNode("cost", reference="REINFORCE")},
learning_rate=0.1
).network()
fn = network.function(
["state"], [("reward", "raw_reward")], include_updates=True)
errors = []
for i in range(5000):
error, = fn(np.random.randn(BATCH_SIZE, 10).astype(fX))
if i % 100 == 0:
print("Iter:", i, "Error:", error)
errors.append(error)
print("mean reward:", np.mean(errors))
|
[
"[email protected]"
] | |
7e3a3bf22bd64c53ffdb6d059ddd55e06a2f0295
|
e81722d244e8647e64f2ffb44e028a1f4c5df410
|
/prepare_data.py
|
98e49fd258646947ae8b42f4672c8a4727556cfe
|
[] |
no_license
|
bvillasen/volumeRender
|
9c16419d19e361799ef6c1a371e6236c90139b79
|
f36586fbf7775d4d39545064b5771cad86d3dfef
|
refs/heads/master
| 2021-08-30T19:43:10.127411 | 2020-10-18T02:05:21 | 2020-10-18T02:05:21 | 198,691,927 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,081 |
py
|
import os, sys
import numpy as np
import h5py as h5
currentDirectory = os.getcwd()
srcDirectory = currentDirectory + "/src/"
dataDirectory = currentDirectory + "/data_src/"
sys.path.extend([ srcDirectory, dataDirectory ] )
from tools import create_directory
from load_data_cholla_distributed import load_snapshot_data_distributed
#Load Snapshot Data
nPoints = 1024
# dataDir = '/raid/bruno/data/'
dataDir = '/data/groups/comp-astro/bruno/'
inDir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/output_files_pchw18/'.format(nPoints)
stats_dir = inDir + 'statistics/'
outDir = dataDir + 'cosmo_sims/{0}_hydro_50Mpc/snapshots_prepared/'.format(nPoints)
create_directory( outDir )
data_type = 'hydro'
# data_type = 'particles'
# Load Statistics
statistics = h5.File( stats_dir + 'stats_{0}.h5'.format(data_type), 'r')
fields = ['density']
precision = np.float32
Lbox = 5000 #kpc/h
if nPoints == 1024: proc_grid = [ 4, 2, 2]
if nPoints == 2048: proc_grid = [ 8, 8, 8]
box_size = [ Lbox, Lbox, Lbox ]
grid_size = [ nPoints, nPoints, nPoints ] #Size of the simulation grid
subgrid = [ [0, nPoints], [0, nPoints], [0, nPoints] ] #Size of the volume to load
field = 'density'
min_val = statistics[field].attrs['min_global']
max_val = statistics[field].attrs['max_global']
print( "Min: {0} Max: {1}".format(min_val, max_val ))
n_snapshot = 169
# for n_snapshot in range(170):
data = load_snapshot_data_distributed( n_snapshot, inDir, data_type, fields, subgrid, precision, proc_grid, box_size, grid_size, show_progess=True )
data_vals = data[data_type][field]
data_vals -= min_val
# Normalize Data
max_val = (max_val - min_val) / 1000
data_vals = np.clip( data_vals, a_min=None, a_max=max_val )
data_vals = np.log10(data_vals + 1) / np.log10( max_val + 1)
# Change to 256 range
data_vals = (255*(data_vals)).astype(np.uint8)
#Write to file
out_file_name = outDir + '{0}_{1}_{2}.h5'.format( data_type, field, n_snapshot )
out_file = h5.File( out_file_name, 'w')
out_file.create_dataset( field, data=data_vals )
out_file.close()
print( "Saved File: " + out_file_name )
|
[
"[email protected]"
] | |
fc9da05b724f3cc401ad8e99bf801480a47d99ec
|
187a6558f3c7cb6234164677a2bda2e73c26eaaf
|
/jdcloud_sdk/services/bgw/models/LocationSpec.py
|
69b6f58d03ef8610c8cc7e5db8003c726aa8d3d1
|
[
"Apache-2.0"
] |
permissive
|
jdcloud-api/jdcloud-sdk-python
|
4d2db584acc2620b7a866af82d21658cdd7cc227
|
3d1c50ed9117304d3b77a21babe899f939ae91cd
|
refs/heads/master
| 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 |
Apache-2.0
| 2023-09-07T06:54:49 | 2018-03-22T03:47:02 |
Python
|
UTF-8
|
Python
| false | false | 1,533 |
py
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class LocationSpec(object):
def __init__(self, locationCode=None, locationPortSpecCode=None, locationISPCode=None):
"""
:param locationCode: (Optional) 专线创建的地域编码;只在创建自助连接时生效,通过调用[describeLocations](../Location/describeLocations.md)接口获取
:param locationPortSpecCode: (Optional) 专线接入端口规格代码,在创建自助连接和托管专线时生效.通过调用[describeLocations](../Location/describeLocations.md)接口获取
:param locationISPCode: (Optional) 专线接入运营商代码,只在创建自助连接时生效.通过调用[describeLocations](../Location/describeLocations.md)接口获取
"""
self.locationCode = locationCode
self.locationPortSpecCode = locationPortSpecCode
self.locationISPCode = locationISPCode
|
[
"[email protected]"
] | |
1a95c984ef4b479eb1dafc39164ee5b439a1e1ac
|
e0045eec29aab56212c00f9293a21eb3b4b9fe53
|
/project/tests/test_project_ui.py
|
08ef944eea3a788fc496ef58c731307bf53c4486
|
[] |
no_license
|
tamam001/ALWAFI_P1
|
a3a9268081b9befc668a5f51c29ce5119434cc21
|
402ea8687c607fbcb5ba762c2020ebc4ee98e705
|
refs/heads/master
| 2020-05-18T08:16:50.583264 | 2019-04-30T14:43:46 | 2019-04-30T14:43:46 | 184,268,686 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
# Part of ALWAFI. See LICENSE file for full copyright and licensing details.
import odoo.tests
@odoo.tests.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_project_tour(self):
self.phantom_js("/web", "odoo.__DEBUG__.services['web_tour.tour'].run('project_tour')", "odoo.__DEBUG__.services['web_tour.tour'].tours.project_tour.ready", login="admin")
|
[
"[email protected]"
] | |
b734607bb98232aba5c08fbf7b0d204352c8348e
|
cac43e8d506ab79074ea4c5fb469f70ea7e6da81
|
/simulation/simulation_results_parser.py
|
85d557483acb2cb51db1cb71f98c18df0047687f
|
[] |
no_license
|
fubuloubu/ad-hoc-networking
|
63b14cb80c6013a84764f65b7fcef275dd7c673e
|
b63f266ab6b90c2b77182cecf2f04749a5e7fa25
|
refs/heads/master
| 2020-06-10T20:37:25.883649 | 2016-12-23T00:07:17 | 2016-12-23T00:07:17 | 75,881,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,694 |
py
|
#!/usr/bin/python3
import print_data_model
# Typical Results looks like:
example = '''
Statistics:
Total Messages: 64
Succesfully Received Messages: 6
Success Rate: 9.38%
Number of Retransmissions: 188
Average Latency: 0.00 [steps]
Statistics:
Total Messages: 52
Succesfully Received Messages: 4
Success Rate: 7.69%
Number of Retransmissions: 208
Average Latency: 0.00 [steps]
Statistics:
Total Messages: 53
Succesfully Received Messages: 4
Success Rate: 7.55%
Number of Retransmissions: 188
Average Latency: 0.00 [steps]
'''
# NOTE: Multiple Simulations possible...
def sanitize(resultsStr):
resultsStr = resultsStr.lstrip().rstrip()
oldLen = 0
while (len(resultsStr) != oldLen):
resultsStr = resultsStr.replace('\n\n','\n')
oldLen = len(resultsStr)
return resultsStr
import re
def extractMetrics(metricString):
metric = {}
metricString = metricString.split(': ')
metric["title"] = metricString[0]
metric["mname"] = metricString[0].lower().replace(' ','-')
match = re.search(r'([0-9.]+) *(.*)', metricString[1])
if match:
(data, units) = match.group(1,2)
metric["value"] = data
metric["units"] = 'none' if units == '' else \
units.lstrip().replace('[','').replace(']','')
else:
raise ValueError("'{}' does not parse with regex".format(metricString[1]))
return metric
# Parse output of simulation run
class SimulationMetrics(print_data_model.MetricContainer):
def __init__(self, datastring):
# Clean data string and split by simulation run
simStats = sanitize(datastring).split('Statistics:\n')
# Remove empty entries and split by line
simStats = filter(None, simStats)
simStats = map(lambda s: s.rstrip().split('\n'), simStats)
# Parse each raw metric line into a metric object
# NOTE: Using list here because below we need to use it twice
simStats = list(map(lambda s: list(map(lambda ms: extractMetrics(ms), s)), simStats))
# Make sure metric names in each simulation line up
# e.g. there are N duplicates of every metric in list
metricNames = map(lambda s: [ m["mname"] for m in s], simStats)
def checkEqual(iterator):
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
# Raise error if fault is found
if not checkEqual(metricNames):
raise ValueError("Simulations do not have matching metrics")
# Create lists by mapping each simulation metric
# to unique metric name using position in list
metricNames = [ m["mname"] for m in simStats[0] ]
metricTitles = [ m["title"] for m in simStats[0] ]
metricUnits = [ m["units"] for m in simStats[0] ]
metric_list = []
title_list = []
for i in range(len(simStats)):
for j in range(len(metricNames)):
metric_list.append("{1}-{0:02d}".format(i+1, metricNames[j]))
title_list.append("Simulation {0} {1}".
format(i+1, metricTitles[j], metricUnits[j]))
# Get data list by extracting value from metrics and flattening that list
from ast import literal_eval
# NOTE: Using list here because below we need to use it twice
metricData = list(map(lambda s: [ literal_eval(m["value"]) for m in s], simStats))
data_list = [item for sublist in metricData for item in sublist]
# Create and append average metrics
# First transpose list of lists
avgMetricData = map(lambda *a: list(a), *metricData)
# Then do average by summing and dividing by number of entries
avgMetricData = map(lambda l: sum(l), avgMetricData)
avgMetricData = map(lambda s: s/float(len(simStats)), avgMetricData)
# NOTE: Using list here because below we need use subscripts
avgMetricData = list(avgMetricData)
# Finally append all average metrics to list
for i in range(len(metricNames)):
metric_list.append("avg-{0}".format(metricNames[i]))
title_list.append("Simulation Average {0}".
format(metricTitles[i], metricUnits[i]))
data_list.append(avgMetricData[i])
# Initialize container for all metrics we discovered
print_data_model.MetricContainer.__init__(self, metric_list, title_list, data_list)
# Use argparsing from base module
if __name__ == '__main__':
print_data_model.main(SimulationMetrics, example)
|
[
"[email protected]"
] | |
970052a55f375ecee9553f24eb9852ddfc9a8962
|
116acf603f5db8d626247355bf786c339ba95ea9
|
/libs/options.py
|
2cc4a75cb027d45077396d591f2b05a0f1016b80
|
[] |
no_license
|
dahunuaa/ZhihuiSMB_python3
|
0857afeec2337b44571986a9c70c26e716142ccb
|
8db2708efccd5eefa393738500e326bd7fb65c21
|
refs/heads/master
| 2021-01-25T14:32:32.201879 | 2018-03-11T05:59:10 | 2018-03-11T05:59:10 | 123,703,184 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,854 |
py
|
# -*- coding:utf-8 -*-
"""
alter by:dahu
alter on:2016-11-17
"""
import os
import logging
from tornado.options import parse_command_line, options, define
from ZhihuiSMB.libs import configlib
def get_base_config():
root_path = configlib.root_path
os.chdir(root_path+'/configs')
cfg=configlib.Config('base.icfg')
cfg.addNamespace(configlib)
os.chdir(root_path)
return cfg
def parse_config_file(path):
"""Rewrite tornado default parse_config_file.
Parses and loads the Python config file at the given path.
This version allow customize new options which are not defined before
from a configuration file.
"""
config = {}
with open(path, 'r', encoding='utf-8') as f:
code = compile(f.read(), path, 'exec')
exec(code, config, config)
# execfile(path, config, config)
for name in config:
if name in options:
options[name].set(config[name])
else:
define(name, config[name])
def parse_options():
_root = ''
_settings = os.path.join(_root, "settings.py")
# _projects_configs = [os.path.join(_root, "package2.icfg"),os.path.join(_root, "package.icfg")]
# _settings_local = os.path.join(_root, "settings_local.py")
try:
parse_config_file(_settings)
# parse_projects_config_file(_projects_configs)
logging.info("Using settings.py as default settings.")
except Exception as e:
import traceback
print(traceback.format_exc())
logging.error("No any default settings, are you sure? Exception: %s" % e)
'''
try:
parse_config_file(_settings_local)
logging.info("Override some settings with local settings.")
except Exception, e:
logging.error("No local settings. Exception: %s" % e)
'''
parse_command_line()
config = get_base_config()
|
[
"dahu yao"
] |
dahu yao
|
842b0e029a9d3e87a5e0a33a7d76de2cb72a3ccd
|
c85a6d674679780ee510b5c8c3dbcbdecc859f64
|
/swagger_client/__init__.py
|
3d74d68b52cf9561f2ad3314cd8c0b2e8674ea96
|
[] |
no_license
|
cbrowet-axway/APIM_sdk
|
d4f4a124e86a7b2e65d0ef07b54c68e95de68337
|
4f82df67ebe3dd6eae645bab8f86e72c0347ee24
|
refs/heads/master
| 2020-05-25T13:22:35.802350 | 2020-04-16T09:25:21 | 2020-04-16T09:25:21 | 187,820,389 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,955 |
py
|
# coding: utf-8
# flake8: noqa
"""
API Manager API v1.3
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.3.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from swagger_client.api.api_discovery_api import APIDiscoveryApi
from swagger_client.api.api_manager_services_api import APIManagerServicesApi
from swagger_client.api.api_proxy_registration_api import APIProxyRegistrationApi
from swagger_client.api.api_repository_api import APIRepositoryApi
from swagger_client.api.applications_api import ApplicationsApi
from swagger_client.api.current_user_api import CurrentUserApi
from swagger_client.api.login_api import LoginApi
from swagger_client.api.metrics_api import MetricsApi
from swagger_client.api.migrate_api import MigrateApi
from swagger_client.api.o_auth_authorizations_api import OAuthAuthorizationsApi
from swagger_client.api.organizations_api import OrganizationsApi
from swagger_client.api.quotas_api import QuotasApi
from swagger_client.api.users_api import UsersApi
# import ApiClient
from swagger_client.api_client import ApiClient
from swagger_client.configuration import Configuration
# import models into sdk package
from swagger_client.models.api import API
from swagger_client.models.api_access import APIAccess
from swagger_client.models.api_definition import APIDefinition
from swagger_client.models.api_key import APIKey
from swagger_client.models.api_promotion import APIPromotion
from swagger_client.models.alert_config import AlertConfig
from swagger_client.models.application import Application
from swagger_client.models.application_request import ApplicationRequest
from swagger_client.models.authenticated_user_attributes import AuthenticatedUserAttributes
from swagger_client.models.authentication_profile import AuthenticationProfile
from swagger_client.models.authorization import Authorization
from swagger_client.models.authorization_code import AuthorizationCode
from swagger_client.models.backend_blob import BackendBlob
from swagger_client.models.backend_export import BackendExport
from swagger_client.models.backend_method_export import BackendMethodExport
from swagger_client.models.ca_cert import CACert
from swagger_client.models.cors_profile import CORSProfile
from swagger_client.models.config import Config
from swagger_client.models.custom_properties_config import CustomPropertiesConfig
from swagger_client.models.custom_property import CustomProperty
from swagger_client.models.custom_property_option import CustomPropertyOption
from swagger_client.models.custom_property_permission import CustomPropertyPermission
from swagger_client.models.discovery_api import DiscoveryAPI
from swagger_client.models.error_response import ErrorResponse
from swagger_client.models.export_options import ExportOptions
from swagger_client.models.external_client import ExternalClient
from swagger_client.models.frontend_export import FrontendExport
from swagger_client.models.grant_types import GrantTypes
from swagger_client.models.group import Group
from swagger_client.models.host import Host
from swagger_client.models.implicit import Implicit
from swagger_client.models.inbound_profiles import InboundProfiles
from swagger_client.models.lock import Lock
from swagger_client.models.login_endpoint import LoginEndpoint
from swagger_client.models.method import Method
from swagger_client.models.metric_field import MetricField
from swagger_client.models.metric_timeline import MetricTimeline
from swagger_client.models.number import Number
from swagger_client.models.o_auth_app_scope import OAuthAppScope
from swagger_client.models.o_auth_client import OAuthClient
from swagger_client.models.o_auth_protected_resource import OAuthProtectedResource
from swagger_client.models.o_auth_resource import OAuthResource
from swagger_client.models.operation import Operation
from swagger_client.models.organization import Organization
from swagger_client.models.outbound_profiles import OutboundProfiles
from swagger_client.models.param_value import ParamValue
from swagger_client.models.parameter import Parameter
from swagger_client.models.permission_dto import PermissionDTO
from swagger_client.models.portal_traffic_listener import PortalTrafficListener
from swagger_client.models.quota_api_constraint_dto import QuotaApiConstraintDTO
from swagger_client.models.quota_dto import QuotaDTO
from swagger_client.models.referenced_entity import ReferencedEntity
from swagger_client.models.registration_token import RegistrationToken
from swagger_client.models.remote_host import RemoteHost
from swagger_client.models.response_code import ResponseCode
from swagger_client.models.schema_object import SchemaObject
from swagger_client.models.scope import Scope
from swagger_client.models.security_device import SecurityDevice
from swagger_client.models.security_profile import SecurityProfile
from swagger_client.models.series import Series
from swagger_client.models.service import Service
from swagger_client.models.service_profiles import ServiceProfiles
from swagger_client.models.swagger import Swagger
from swagger_client.models.swagger_security_device import SwaggerSecurityDevice
from swagger_client.models.swagger_security_profile import SwaggerSecurityProfile
from swagger_client.models.system_config import SystemConfig
from swagger_client.models.token_endpoint import TokenEndpoint
from swagger_client.models.token_request_endpoint import TokenRequestEndpoint
from swagger_client.models.topology import Topology
from swagger_client.models.user import User
from swagger_client.models.virtualized_api import VirtualizedAPI
from swagger_client.models.virtualized_api_method import VirtualizedAPIMethod
from swagger_client.models.virtualized_method_export import VirtualizedMethodExport
|
[
"[email protected]"
] | |
2c7e1af7be7fc0e028d39a61eaecff78f3e51fbf
|
76e62ddbfdfba19c80b37e855a4df67672ef0808
|
/PINp/2015/GOLOVIN_A_I/task_6_7.py
|
f44cf765622b99a9db68592a554beeabdd89cf01
|
[
"Apache-2.0"
] |
permissive
|
stasvorosh/pythonintask
|
9d30f3cd492e89783b7221402375c1ebe4690baa
|
8169ed26510022fe0d589f4013f11749131957df
|
refs/heads/master
| 2021-01-17T16:49:32.778063 | 2016-10-10T14:08:04 | 2016-10-10T14:08:04 | 52,255,539 | 6 | 0 | null | 2016-02-22T07:33:16 | 2016-02-22T07:33:15 | null |
UTF-8
|
Python
| false | false | 812 |
py
|
# Задача 6. Вариант 7.
# Создайте игру, в которой компьютер загадывает имя одного из двух сооснователей компании #Google, а игрок должен его угадать.
# Golovin A.I.
# 02.06.2016
import random
avtori = ("Ларри Пейдж", "Сергей Михайлович Брин")
zagadka = random.choice(avtori)
predpologenie = input("Программа загадала одного из основателей гугл\nВаше предположение: ")
if predpologenie.lower() == zagadka.lower():
print("ХААААААААРООООООООШ")
else:
print ("Неправильно\nПравильный ответ - " + zagadka)
input("\n\nВведите ENTER для выхода")
|
[
"[email protected]"
] | |
293c0cc89b6d785d75a702398378df58540c5f14
|
51b6d2fc53d5c632fcf01319842baebf13901e84
|
/atcoder.jp/abc198/abc198_d/Main.py
|
84b6d70e1c6e06e31d599ed119a69b9ab833910f
|
[] |
no_license
|
mono-0812/procon
|
35db3b2c21eff74fbd7b52db07f249380f6834ef
|
68a4b53880a228a0164052b23d1326363efcbc20
|
refs/heads/master
| 2023-05-30T17:02:58.935074 | 2021-06-27T12:15:10 | 2021-06-27T12:15:10 | 345,896,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
import bisect,collections,copy,heapq,itertools,math,string,sys
def I(): return input()
def IS(): return input().split()
def II(): return int(input())
def IIS(): return map(int,input().split())
def LIIS(): return list(map(int,input().split()))
INF=float("inf")
MOD=10**9+7
##############################################################################
s1=I();s2=I();s3=I()
li=list(set(s1+s2+s3))
if len(li)>=11:
print("UNSOLVABLE")
exit()
t=0
for l in itertools.permutations(range(10),len(li)):
dic={}
S1=0
S2=0
S3=0
i=0
for key in li:
dic[key]=l[i]
i+=1
if dic[s1[0]]==0 or dic[s2[0]]==0 or dic[s3[0]]==0:continue
for i in range(len(s1)):
S1=S1*10+dic[s1[i]]
for i in range(len(s2)):
S2=S2*10+dic[s2[i]]
for i in range(len(s3)):
S3=S3*10+dic[s3[i]]
if S1+S2==S3:
print(S1)
print(S2)
print(S3)
exit()
print("UNSOLVABLE")
|
[
"[email protected]"
] | |
6b8ed594d7010e2c8bdc88e05eaafdead4a82e25
|
e97e727972149063b3a1e56b38961d0f2f30ed95
|
/test/test_dispositions_api.py
|
72054a85865322636021709bc99a39b437ac9845
|
[] |
no_license
|
knetikmedia/knetikcloud-python-client
|
f3a485f21c6f3e733a864194c9acf048943dece7
|
834a24415385c906732437970db105e1bc71bde4
|
refs/heads/master
| 2021-01-12T10:23:35.307479 | 2018-03-14T16:04:24 | 2018-03-14T16:04:24 | 76,418,830 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,570 |
py
|
# coding: utf-8
"""
Knetik Platform API Documentation latest
This is the spec for the Knetik API. Use this in conjunction with the documentation found at https://knetikcloud.com.
OpenAPI spec version: latest
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import knetik_cloud
from knetik_cloud.rest import ApiException
from knetik_cloud.apis.dispositions_api import DispositionsApi
class TestDispositionsApi(unittest.TestCase):
""" DispositionsApi unit test stubs """
def setUp(self):
self.api = knetik_cloud.apis.dispositions_api.DispositionsApi()
def tearDown(self):
pass
def test_add_disposition(self):
"""
Test case for add_disposition
Add a new disposition
"""
pass
def test_delete_disposition(self):
"""
Test case for delete_disposition
Delete a disposition
"""
pass
def test_get_disposition(self):
"""
Test case for get_disposition
Returns a disposition
"""
pass
def test_get_disposition_counts(self):
"""
Test case for get_disposition_counts
Returns a list of disposition counts
"""
pass
def test_get_dispositions(self):
"""
Test case for get_dispositions
Returns a page of dispositions
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
de65e17d5e6e9ac507ce87a8fcceec8ca660929e
|
2e5314c4a1816301508e1d9d8094a5f99c808ff0
|
/phase_model_svm.py
|
b916556b431694c261a483e85a9ff3e1405b3f64
|
[
"MIT"
] |
permissive
|
cahya-wirawan/phase-classification
|
0ad387547c2bbdce3a1fe4cea785c8f95b04619d
|
ca65442c4f2a30004a17cf79cbe54cf9c2f6925d
|
refs/heads/master
| 2022-12-14T04:58:45.215718 | 2019-01-11T12:07:29 | 2019-01-11T12:07:29 | 119,407,522 | 2 | 2 |
MIT
| 2022-12-08T00:54:07 | 2018-01-29T16:18:14 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 478 |
py
|
from sklearn import svm
from sklearn.model_selection import GridSearchCV
# define baseline model
def model_svm(layers, dropout=0.1, layer_number=None):
params_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
# num_round = 30 # the number of training iterations
model = GridSearchCV(svm.SVC(), params_grid, cv=5, scoring='accuracy', n_jobs=10)
return model
|
[
"[email protected]"
] | |
cf533b5c6e6480bfc4190c6806832be62525289a
|
b8fd7e01a7069a0666eb2fe21991753fd5ff7860
|
/Dynamic Programming/746. Min Cost Climbing Stairs rec.py
|
82f373d18007ee68c7f89698e1626d4bd217d94d
|
[] |
no_license
|
Jafoor/Leet-Code-Solved-Problems
|
0b6be0f3c82b1bc13c0c484782db65601cefa7b8
|
935e5679e04bf6f9c9d8a0bdf8b204923a2bc7a5
|
refs/heads/master
| 2023-07-02T13:38:59.690783 | 2021-07-19T16:20:48 | 2021-07-19T16:20:48 | 256,105,425 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
class Solution(object):
def minCostClimbingStairs(self, cost):
memo = [0]*(len(cost)+10)
m1 = solve(0,cost,memo)
m2 = solve(1,cost,memo)
return min(m1,m2)
def solve(i,cost,memo):
if i>=len(cost):
return 0
if memo[i] == 0:
x1 = cost[i] + solve(i+1,cost,memo)
x2 = cost[i] + solve(i+2,cost,memo)
memo[i] = min(x1,x2)
return memo[i]
|
[
"[email protected]"
] | |
18e9295e97ab81fcc39d33ebd4605505a63da9db
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/7/usersdata/74/4449/submittedfiles/esferas.py
|
19dc11065165f216271b0a42907b5e5cdbc2e5b2
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 308 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
e1 = input('Volume da primeira esfera')
e2 = input('Volume da segunda esfera')
e3 = input('Volume da terceira esfera')
e4 = input('Volume da quarta esfera')
a = e2+e3+e4
d = e2+e3
if a = e1 and d = e4 and e2 = e3:
print('S')
else:
print('N')
|
[
"[email protected]"
] | |
99760e10fa9e33679326968433083b8d2d910f35
|
894ed667dae7e299f472a0b531ea1783ed58fd27
|
/src/Basic.py
|
162deaa3803d9e523e6464c785115c853ffb4632
|
[] |
no_license
|
satpreetsingh/OpenAgent
|
dd2a8ade47159ee6b3345b9328e068e1dc419052
|
09985fc45c0efa7fffa8a15127a0e7f48d5de30d
|
refs/heads/master
| 2021-05-03T07:45:26.230037 | 2017-04-10T12:42:46 | 2017-04-10T12:42:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 674 |
py
|
class Sub:
def f (self, inputs):
return inputs[0] - inputs[1]
class Add:
def f (self, inputs):
return inputs[0] + inputs[1]
class Mult:
def f (self, inputs):
return inputs[0] * inputs[1]
class Less:
def f (self, inputs):
return inputs[0] < inputs[1]
class Equal:
def f (self, inputs):
return inputs[0] == inputs[1]
class More:
def f (self, inputs):
return inputs[0] > inputs[1]
class Not:
def f (self, inputs):
return inputs[0] == 0
class Or:
def f (self, inputs):
return inputs[0] == 1 or inputs[1] == 1
class And:
def f (self, inputs):
return inputs[0] == 1 and inputs[1] == 1
class Abs:
def f (self, inputs):
return abs(inputs[0])
|
[
"[email protected]"
] | |
318093ab4b9cf1a820e3d9e389d529875df1f23e
|
097a7cb751a7c5b311b11fea60278081c751dcc1
|
/src/twisted/names/test/test_names.py
|
a1672c64f0a02f411846a0c3b27c1d58c2537a44
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
dchenk/twisted
|
b12449d968804ebd832d4a814e3fc858420b0e51
|
4a0ac6edba947c77ea95085e75f5b7bc7ddffb12
|
refs/heads/master
| 2021-07-24T15:59:39.735816 | 2020-08-24T23:12:15 | 2020-08-24T23:12:15 | 211,361,245 | 0 | 1 |
NOASSERTION
| 2019-10-02T18:36:33 | 2019-09-27T16:34:31 | null |
UTF-8
|
Python
| false | false | 47,789 |
py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import copy
import operator
import socket
from io import BytesIO
from functools import partial, reduce
from struct import pack
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, dns
from twisted.names.dns import (
SOA, Message, RRHeader, Record_A, Record_SOA, Query)
from twisted.names.error import DomainError
from twisted.names.client import Resolver
from twisted.names.secondary import (
SecondaryAuthorityService, SecondaryAuthority)
from twisted.python.compat import nativeString
from twisted.python.filepath import FilePath
from twisted.test.proto_helpers import (
StringTransport, MemoryReactorClock, waitUntilAllDisconnected)
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = b'test-domain.com',
rname = u'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = b'93.84.28.in-addr.arpa',
rname = b'93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = u'my-domain.com',
rname = b'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = (b'test-domain.com', soa_record),
records = {
b'test-domain.com': [
soa_record,
dns.Record_A(b'127.0.0.1'),
dns.Record_NS(b'39.28.189.39'),
dns.Record_SPF(b'v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF(b'v=spf1 +mx a:\0colo',
b'.example.com/28 -all not valid'),
dns.Record_MX(10, u'host.test-domain.com'),
dns.Record_HINFO(os=b'Linux', cpu=b'A Fast One, Dontcha know'),
dns.Record_CNAME(b'canonical.name.com'),
dns.Record_MB(b'mailbox.test-domain.com'),
dns.Record_MG(b'mail.group.someplace'),
dns.Record_TXT(b'A First piece of Text', b'a SecoNd piece'),
dns.Record_A6(0, b'ABCD::4321', b''),
dns.Record_A6(12, b'0:0069::0', b'some.network.tld'),
dns.Record_A6(8, b'0:5634:1294:AFCB:56AC:48EF:34C3:01FF',
b'tra.la.la.net'),
dns.Record_TXT(b'Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR(b'mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx=b'r mail box', emailbx=b'e mail box'),
dns.Record_AFSDB(subtype=1, hostname=b'afsdb.test-domain.com'),
dns.Record_RP(mbox=b'whatever.i.dunno', txt=b'some.more.text'),
dns.Record_WKS(b'12.54.78.12', socket.IPPROTO_TCP,
b'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, b"u", b"sip+E2U",
b"!^.*$!sip:[email protected]!"),
dns.Record_AAAA(b'AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
b'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, b'some.other.place.fool')
],
b'host.test-domain.com': [
dns.Record_A(b'123.242.1.5'),
dns.Record_A(b'0.255.0.255'),
],
b'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A(b'255.255.255.254'),
dns.Record_A(b'0.0.0.0')
],
b'cname.test-domain.com': [
dns.Record_CNAME(b'test-domain.com')
],
b'anothertest-domain.com': [
dns.Record_A(b'1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = (b'93.84.28.in-addr.arpa', reverse_soa),
records = {
b'123.93.84.28.in-addr.arpa': [
dns.Record_PTR(b'test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = (b'my-domain.com', my_soa),
records = {
b'my-domain.com': [
my_soa,
dns.Record_A(b'1.2.3.4', ttl='1S'),
dns.Record_NS(b'ns1.domain', ttl=b'2M'),
dns.Record_NS(b'ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, b'some.other.place.fool',
ttl='4D')
]
}
)
class ServerDNSTests(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
return waitUntilAllDisconnected(reactor, self.factory.connections[:])
def namesTest(self, querying, expectedRecords):
"""
Assert that the DNS response C{querying} will eventually fire with
contains exactly a certain collection of records.
@param querying: A L{Deferred} returned from one of the DNS client
I{lookup} methods.
@param expectedRecords: A L{list} of L{IRecord} providers which must be
in the response or the test will be failed.
@return: A L{Deferred} that fires when the assertion has been made. It
fires with a success result if the assertion succeeds and with a
L{Failure} if it fails.
"""
def checkResults(response):
receivedRecords = justPayload(response)
self.assertEqual(set(expectedRecords), set(receivedRecords))
querying.addCallback(checkResults)
return querying
def test_addressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def test_addressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784),
dns.Record_A('0.255.0.255', ttl=19283784)]
)
def test_addressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def test_authority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def test_mailExchangeRecord(self):
"""
The DNS client can issue an MX query and receive a response including
an MX record as well as any A record hints.
"""
return self.namesTest(
self.resolver.lookupMailExchange(b"test-domain.com"),
[dns.Record_MX(10, b"host.test-domain.com", ttl=19283784),
dns.Record_A(b"123.242.1.5", ttl=19283784),
dns.Record_A(b"0.255.0.255", ttl=19283784)])
def test_nameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def test_HINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os=b'Linux', cpu=b'A Fast One, Dontcha know',
ttl=19283784)]
)
def test_PTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def test_CNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def test_MB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def test_MG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def test_MR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def test_MINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def test_SRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def test_AFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def test_RP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def test_TXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT(b'A First piece of Text', b'a SecoNd piece',
ttl=19283784),
dns.Record_TXT(b'Some more text, haha! Yes. \0 Still here?',
ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF(b'v=spf1 mx/30 mx:example.org/30 -all',
ttl=19283784),
dns.Record_SPF(b'v=spf1 +mx a:\0colo',
b'.example.com/28 -all not valid', ttl=19283784)]
)
def test_WKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
b'\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def test_someRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def test_AAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def test_A6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def test_zoneTransferConnectionFails(self):
"""
A failed AXFR TCP connection errbacks the L{Deferred} returned
from L{Resolver.lookupZone}.
"""
resolver = Resolver(servers=[("nameserver.invalid", 53)])
return self.assertFailure(resolver.lookupZone("impossible.invalid"),
error.DNSLookupError)
def test_similarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, b"u", b"sip+E2U",
b"!^.*$!sip:[email protected]!",
ttl=19283784)])
class HelperTests(unittest.TestCase):
def test_serialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.assertTrue(a < b)
a = b
class AXFRTests(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def test_bindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
def _gotResults(self, result):
self.results = result
def test_DJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
class ResolvConfHandlingTests(unittest.TestCase):
def test_missing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def test_empty(self):
resolvConf = self.mktemp()
open(resolvConf, 'w').close()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_domainErrorForNameWithCommonSuffix(self):
"""
L{FileAuthority} lookup methods errback with L{DomainError} if
the requested C{name} shares a common suffix with its zone but
is not actually a descendant of its zone, in terms of its
sequence of DNS name labels. eg www.the-example.com has
nothing to do with the zone example.com.
"""
testDomain = test_domain_com
testDomainName = b'nonexistent.prefix-' + testDomain.soa[0]
f = self.failureResultOf(testDomain.lookupAddress(testDomainName))
self.assertIsInstance(f.value, DomainError)
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(soa_record.mname.name, soa_record),
records={
soa_record.mname.name: [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
answer, authority, additional = self.successResultOf(
authority.lookupAddress(soa_record.mname.name))
self.assertEqual(answer, [])
self.assertEqual(
authority, [
dns.RRHeader(
soa_record.mname.name, soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEqual(additional, [])
def test_unknownTypeNXDOMAIN(self):
"""
Requesting a record of unknown type where no records exist for the name
in question results in L{DomainError}.
"""
testDomain = test_domain_com
testDomainName = b'nonexistent.prefix-' + testDomain.soa[0]
unknownType = max(common.typeToMethod) + 1
f = self.failureResultOf(
testDomain.query(Query(name=testDomainName, type=unknownType)))
self.assertIsInstance(f.value, DomainError)
def test_unknownTypeMissing(self):
"""
Requesting a record of unknown type where other records exist for the
name in question results in an empty answer set.
"""
unknownType = max(common.typeToMethod) + 1
answer, authority, additional = self.successResultOf(
my_domain_com.query(
Query(name=u'my-domain.com', type=unknownType)))
self.assertEqual(answer, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = b'example.' + soa_record.mname.name
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(soa_record.mname.name, soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
answer, authority, additional = self.successResultOf(d)
self.assertEqual(answer, [])
self.assertEqual(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEqual(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class AdditionalProcessingTests(unittest.TestCase):
"""
Tests for L{FileAuthority}'s additional processing for those record types
which require it (MX, CNAME, etc).
"""
_A = dns.Record_A(b"10.0.0.1")
_AAAA = dns.Record_AAAA(b"f080::1")
def _lookupSomeRecords(self, method, soa, makeRecord, target, addresses):
"""
Perform a DNS lookup against a L{FileAuthority} configured with records
as defined by C{makeRecord} and C{addresses}.
@param method: The name of the lookup method to use; for example,
C{"lookupNameservers"}.
@type method: L{str}
@param soa: A L{Record_SOA} for the zone for which the L{FileAuthority}
is authoritative.
@param makeRecord: A one-argument callable which accepts a name and
returns an L{IRecord} provider. L{FileAuthority} is constructed
with this record. The L{FileAuthority} is queried for a record of
the resulting type with the given name.
@param target: The extra name which the record returned by
C{makeRecord} will be pointed at; this is the name which might
require extra processing by the server so that all the available,
useful information is returned. For example, this is the target of
a CNAME record or the mail exchange host pointed to by an MX record.
@type target: L{bytes}
@param addresses: A L{list} of records giving addresses of C{target}.
@return: A L{Deferred} that fires with the result of the resolver
method give by C{method}.
"""
authority = NoFileAuthority(
soa=(soa.mname.name, soa),
records={
soa.mname.name: [makeRecord(target)],
target: addresses,
},
)
return getattr(authority, method)(soa_record.mname.name)
def assertRecordsMatch(self, expected, computed):
"""
Assert that the L{RRHeader} instances given by C{expected} and
C{computed} carry all the same information but without requiring the
records appear in the same order.
@param expected: A L{list} of L{RRHeader} instances giving the expected
records.
@param computed: A L{list} of L{RRHeader} instances giving the records
computed by the scenario under test.
@raise self.failureException: If the two collections of records
disagree.
"""
# RRHeader instances aren't inherently ordered. Impose an ordering
# that's good enough for the purposes of these tests - in which we
# never have more than one record of a particular type.
key = lambda rr: rr.type
self.assertEqual(sorted(expected, key=key), sorted(computed, key=key))
def _additionalTest(self, method, makeRecord, addresses):
"""
Verify that certain address records are included in the I{additional}
section of a response generated by L{FileAuthority}.
@param method: See L{_lookupSomeRecords}
@param makeRecord: See L{_lookupSomeRecords}
@param addresses: A L{list} of L{IRecord} providers which the
I{additional} section of the response is required to match
(ignoring order).
@raise self.failureException: If the I{additional} section of the
response consists of different records than those given by
C{addresses}.
"""
target = b"mail." + soa_record.mname.name
d = self._lookupSomeRecords(
method, soa_record, makeRecord, target, addresses)
answer, authority, additional = self.successResultOf(d)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses],
additional)
def _additionalMXTest(self, addresses):
"""
Verify that a response to an MX query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupMailExchange", partial(dns.Record_MX, 10), addresses)
def test_mailExchangeAdditionalA(self):
"""
If the name of the MX response has A records, they are included in the
additional section of the response.
"""
self._additionalMXTest([self._A])
def test_mailExchangeAdditionalAAAA(self):
"""
If the name of the MX response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalMXTest([self._AAAA])
def test_mailExchangeAdditionalBoth(self):
"""
If the name of the MX response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalMXTest([self._A, self._AAAA])
def _additionalNSTest(self, addresses):
"""
Verify that a response to an NS query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupNameservers", dns.Record_NS, addresses)
def test_nameserverAdditionalA(self):
"""
If the name of the NS response has A records, they are included in the
additional section of the response.
"""
self._additionalNSTest([self._A])
def test_nameserverAdditionalAAAA(self):
"""
If the name of the NS response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalNSTest([self._AAAA])
def test_nameserverAdditionalBoth(self):
"""
If the name of the NS response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalNSTest([self._A, self._AAAA])
def _answerCNAMETest(self, addresses):
"""
Verify that a response to a CNAME query has certain records in the
I{answer} section.
@param addresses: See C{_additionalTest}
"""
target = b"www." + soa_record.mname.name
d = self._lookupSomeRecords(
"lookupCanonicalName", soa_record, dns.Record_CNAME, target,
addresses)
answer, authority, additional = self.successResultOf(d)
alias = dns.RRHeader(
soa_record.mname.name, dns.CNAME, ttl=soa_record.expire,
payload=dns.Record_CNAME(target), auth=True)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses] + [alias],
answer)
def test_canonicalNameAnswerA(self):
"""
If the name of the CNAME response has A records, they are included in
the answer section of the response.
"""
self._answerCNAMETest([self._A])
def test_canonicalNameAnswerAAAA(self):
"""
If the name of the CNAME response has AAAA records, they are included
in the answer section of the response.
"""
self._answerCNAMETest([self._AAAA])
def test_canonicalNameAnswerBoth(self):
"""
If the name of the CNAME response has both A and AAAA records, they are
all included in the answer section of the response.
"""
self._answerCNAMETest([self._A, self._AAAA])
class NoInitialResponseTests(unittest.TestCase):
def test_noAnswer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
class SecondaryAuthorityServiceTests(unittest.TestCase):
"""
Tests for L{SecondaryAuthorityService}, a service which keeps one or more
authorities up to date by doing zone transfers from a master.
"""
def test_constructAuthorityFromHost(self):
"""
L{SecondaryAuthorityService} can be constructed with a C{str} giving a
master server address and several domains, causing the creation of a
secondary authority for each domain and that master server address and
the default DNS port.
"""
primary = '192.168.1.2'
service = SecondaryAuthorityService(
primary, [b'example.com', 'example.org'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 53)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, 53)
self.assertEqual(service.domains[0].domain, b'example.com')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, 53)
self.assertEqual(service.domains[1].domain, b'example.org')
def test_constructAuthorityFromHostAndPort(self):
"""
L{SecondaryAuthorityService.fromServerAddressAndDomains} constructs a
new L{SecondaryAuthorityService} from a C{str} giving a master server
address and DNS port and several domains, causing the creation of a secondary
authority for each domain and that master server address and the given
DNS port.
"""
primary = '192.168.1.3'
port = 5335
service = SecondaryAuthorityService.fromServerAddressAndDomains(
(primary, port), ['example.net', b'example.edu'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 5335)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, port)
self.assertEqual(service.domains[0].domain, b'example.net')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, port)
self.assertEqual(service.domains[1].domain, b'example.edu')
def test_constructAuthorityFromBytes(self):
"""
L{SecondaryAuthorityService.fromServerAddressAndDomains} constructs a
new L{SecondaryAuthorityService} from a C{bytes} giving a master server
address and several domains, causing the creation of a secondary
authority for each domain and that master server address and the given
DNS port.
"""
primary = '192.168.1.3'
service = SecondaryAuthorityService(
primary.encode(),
[b'example.net', 'example.edu'], # Coerced to bytes.
)
self.assertEqual(service.primary, primary)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0].domain, b'example.net')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1].domain, b'example.edu')
class SecondaryAuthorityTests(unittest.TestCase):
"""
L{twisted.names.secondary.SecondaryAuthority} correctly constructs objects
with a specified IP address and optionally specified DNS port.
"""
def test_defaultPort(self):
"""
When constructed using L{SecondaryAuthority.__init__}, the default port
of 53 is used.
"""
secondary = SecondaryAuthority('192.168.1.1', 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 53)
self.assertEqual(secondary.domain, b'inside.com')
def test_explicitPort(self):
"""
When constructed using L{SecondaryAuthority.fromServerAddressAndDomain},
the specified port is used.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.1', 5353), 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 5353)
self.assertEqual(secondary.domain, b'inside.com')
def test_transfer(self):
"""
An attempt is made to transfer the zone for the domain the
L{SecondaryAuthority} was constructed with from the server address it
was constructed with when L{SecondaryAuthority.transfer} is called.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.2', 1234), 'example.com')
secondary._reactor = reactor = MemoryReactorClock()
secondary.transfer()
# Verify a connection attempt to the server address above
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0)
self.assertEqual(host, '192.168.1.2')
self.assertEqual(port, 1234)
# See if a zone transfer query is issued.
proto = factory.buildProtocol((host, port))
transport = StringTransport()
proto.makeConnection(transport)
msg = Message()
# DNSProtocol.writeMessage length encodes the message by prepending a
# 2 byte message length to the buffered value.
msg.decode(BytesIO(transport.value()[2:]))
self.assertEqual(
[dns.Query('example.com', dns.AXFR, dns.IN)], msg.queries)
def test_lookupAddress(self):
"""
L{SecondaryAuthority.lookupAddress} returns a L{Deferred} that fires
with the I{A} records the authority has cached from the primary.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.2', 1234), b'example.com')
secondary._reactor = reactor = MemoryReactorClock()
secondary.transfer()
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0)
proto = factory.buildProtocol((host, port))
transport = StringTransport()
proto.makeConnection(transport)
query = Message(answer=1, auth=1)
query.decode(BytesIO(transport.value()[2:]))
# Generate a response with some data we can check.
soa = Record_SOA(
mname=b'ns1.example.com',
rname='admin.example.com',
serial=123456,
refresh=3600,
minimum=4800,
expire=7200,
retry=9600,
ttl=12000,
)
a = Record_A(b'192.168.1.2', ttl=0)
answer = Message(id=query.id, answer=1, auth=1)
answer.answers.extend([
RRHeader(b'example.com', type=SOA, payload=soa),
RRHeader(b'example.com', payload=a),
RRHeader(b'example.com', type=SOA, payload=soa),
])
data = answer.toStr()
proto.dataReceived(pack('!H', len(data)) + data)
result = self.successResultOf(secondary.lookupAddress('example.com'))
self.assertEqual((
[RRHeader(b'example.com', payload=a, auth=True)], [], []), result)
sampleBindZone = b"""\
$ORIGIN example.com.
$TTL 1w
example.com. IN SOA dns.example.com (
2013120201 ; serial number of this zone file
1d ; slave refresh
2h ; slave retry time in case of a problem
4w ; slave expiration time
1h ; maximum caching time in case of failed lookups
)
; A comment.
@ IN AAAA 2001:db8:10::1
example.com. IN A 10.0.0.1
no-in.example.com. A 10.0.0.2 ; technically wrong but used to work
not-fqdn IN MX 10 mx.example.com
www IN CNAME example.com"""
class BindAuthorityTests(unittest.TestCase):
"""
Tests for L{twisted.names.authority.BindAuthority}.
"""
def loadBindString(self, s):
"""
Create a new L{twisted.names.authority.BindAuthority} from C{s}.
@param s: A string with BIND zone data.
@type s: bytes
@return: a new bind authority
@rtype: L{twisted.names.authority.BindAuthority}
"""
fp = FilePath(self.mktemp().encode("ascii"))
fp.setContent(s)
return authority.BindAuthority(fp.path)
def setUp(self):
self.auth = self.loadBindString(sampleBindZone)
def test_ttl(self):
"""
Loads the default $TTL and applies it to all records.
"""
for dom in self.auth.records.keys():
for rec in self.auth.records[dom]:
self.assertTrue(
604800 == rec.ttl
)
def test_originFromFile(self):
"""
Loads the default $ORIGIN.
"""
self.assertEqual(
b"example.com.", self.auth.origin,
)
self.assertIn(
b"not-fqdn.example.com", self.auth.records,
)
def test_aRecords(self):
"""
A records are loaded.
"""
for dom, ip in [(b"example.com", u"10.0.0.1"),
(b"no-in.example.com", u"10.0.0.2")]:
[[rr], [], []] = self.successResultOf(
self.auth.lookupAddress(dom)
)
self.assertEqual(
dns.Record_A(
ip,
604800,
),
rr.payload,
)
def test_aaaaRecords(self):
"""
AAAA records are loaded.
"""
[[rr], [], []] = self.successResultOf(
self.auth.lookupIPV6Address(b"example.com")
)
self.assertEqual(
dns.Record_AAAA(
u"2001:db8:10::1",
604800,
),
rr.payload,
)
def test_mxRecords(self):
"""
MX records are loaded.
"""
[[rr], [], []] = self.successResultOf(
self.auth.lookupMailExchange(b"not-fqdn.example.com")
)
self.assertEqual(
dns.Record_MX(
preference=10, name="mx.example.com", ttl=604800,
),
rr.payload,
)
def test_cnameRecords(self):
"""
CNAME records are loaded.
"""
[answers, [], []] = self.successResultOf(
self.auth.lookupIPV6Address(b"www.example.com")
)
rr = answers[0]
self.assertEqual(
dns.Record_CNAME(
name="example.com", ttl=604800,
),
rr.payload,
)
def test_invalidRecordClass(self):
"""
loadBindString raises NotImplementedError on invalid records.
"""
with self.assertRaises(NotImplementedError) as e:
self.loadBindString(
b"example.com. IN LOL 192.168.0.1"
)
self.assertEqual(
"Record type 'LOL' not supported", e.exception.args[0]
)
def test_invalidDirectives(self):
"""
$INCLUDE and $GENERATE raise NotImplementedError.
"""
for directive in (b"$INCLUDE", b"$GENERATE"):
with self.assertRaises(NotImplementedError) as e:
self.loadBindString(directive + b" doesNotMatter")
self.assertEqual(
nativeString(directive + b" directive not implemented"),
e.exception.args[0]
)
samplePySource = """\
zone = [
SOA(
# For whom we are the authority
'example.com',
# This nameserver's name
mname = "dns.example.com",
# Mailbox of individual who handles this
rname = "root.example.com",
# Unique serial identifying this SOA data
serial = 86400,
# Time interval before zone should be refreshed
refresh = "2H",
# Interval before failed refresh should be retried
retry = "1H",
# Upper limit on time interval before expiry
expire = "1H",
# Minimum TTL
minimum = "3H"
),
AAAA('example.com', '2001:db8:10::1'),
A('example.com', '10.0.0.1'),
NS('example.com', 'dns.example.com'),
A('no-in.example.com', '10.0.0.2'),
PTR('2.0.0.10.in-addr.arpa', 'no-in.example.com'),
CNAME('www.example.com', 'example.com'),
CNAME('ftp.example.com', 'example.com'),
MX('not-fqdn.example.com', 10, 'mail.example.com'),
]
"""
class PySourceAuthorityTests(unittest.TestCase):
"""
Tests for L{twisted.names.authority.PySourceAuthority}.
"""
def loadPySourceString(self, s):
"""
Create a new L{twisted.names.authority.PySourceAuthority} from C{s}.
@param s: A string with BIND zone data in a Python source file.
@type s: L{str}
@return: a new bind authority
@rtype: L{twisted.names.authority.PySourceAuthority}
"""
fp = FilePath(self.mktemp())
with open(fp.path, "w") as f:
f.write(s)
return authority.PySourceAuthority(fp.path)
def setUp(self):
self.auth = self.loadPySourceString(samplePySource)
def test_aRecords(self):
"""
A records are loaded.
"""
for dom, ip in [(b"example.com", u"10.0.0.1"),
(b"no-in.example.com", u"10.0.0.2")]:
[[rr], [], []] = self.successResultOf(
self.auth.lookupAddress(dom)
)
self.assertEqual(
dns.Record_A(
ip
),
rr.payload,
)
def test_aaaaRecords(self):
"""
AAAA records are loaded.
"""
[[rr], [], []] = self.successResultOf(
self.auth.lookupIPV6Address(b"example.com")
)
self.assertEqual(
dns.Record_AAAA(
u"2001:db8:10::1"
),
rr.payload,
)
def test_mxRecords(self):
"""
MX records are loaded.
"""
[[rr], [], []] = self.successResultOf(
self.auth.lookupMailExchange(b"not-fqdn.example.com")
)
self.assertEqual(
dns.Record_MX(
preference=10, name="mail.example.com",
),
rr.payload,
)
def test_cnameRecords(self):
"""
CNAME records are loaded.
"""
[answers, [], []] = self.successResultOf(
self.auth.lookupIPV6Address(b"www.example.com")
)
rr = answers[0]
self.assertEqual(
dns.Record_CNAME(
name="example.com",
),
rr.payload,
)
def test_PTR(self):
"""
PTR records are loaded.
"""
[answers, [], []] = self.successResultOf(
self.auth.lookupPointer(b"2.0.0.10.in-addr.arpa")
)
rr = answers[0]
self.assertEqual(
dns.Record_PTR(
name=b"no-in.example.com",
),
rr.payload,
)
def test_badInputNoZone(self):
"""
Input file has no zone variable
"""
badPySource = "nothing = []"
self.assertRaises(ValueError, self.loadPySourceString, badPySource)
|
[
"[email protected]"
] | |
63ddf4b52d4ca7de34fe3edee0bee60935ab4325
|
a73b1f7876cadf0d9bc0c2c3c68400b2007bff4d
|
/bookmarks/settings.py
|
ab07f451146cb84b1a36762b0453006937f90105
|
[] |
no_license
|
mrk24251/social-networking
|
31c717ace60413086056f396cc786bcb5cef8747
|
0f8e0c9ea390dbd84df2b1daa1b95f05e58deb1b
|
refs/heads/master
| 2022-12-11T05:56:00.652044 | 2021-06-18T08:36:12 | 2021-06-18T08:36:12 | 249,624,656 | 0 | 0 | null | 2022-12-08T04:26:35 | 2020-03-24T05:53:17 |
CSS
|
UTF-8
|
Python
| false | false | 5,105 |
py
|
"""
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@)w-21#*$lr!@pl-2a2*^ha&3rgn7-#-)0msg$_k05t$3@a3l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'annoying',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
"sslserver",
'django.contrib.messages',
'cloudinary_storage',
'django.contrib.staticfiles',
'social_django',
'images.apps.ImagesConfig',
'actions.apps.ActionsConfig',
'sorl.thumbnail',
'django.contrib.postgres',
'cloudinary',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
SETTINGS_PATH = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(SETTINGS_PATH, 'templates'),
)
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'd4qe3u5blhrbam',
'USER': 'exausuvjcqmvse',
'PASSWORD': '712ff4460c544145b4cabc9b6cc78822eacba4b0670e2b660a173b0be8839e2e',
'HOST': 'ec2-52-200-82-50.compute-1.amazonaws.com',
'PORT': '5432'
}
}
ADMINS = (
('Mohammadreza Karami', '[email protected]'),
)
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: reverse_lazy('user_detail',
args=[u.username])
}
THUMBNAIL_DEBUG = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
'social_core.backends.google.GoogleOAuth2',
]
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '915019433080-sn5o3ue35inhvpgfoq572r7ufgaigka0.apps.googleusercontent.com' # Google Consumer Key
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'ySLD3I7esB-SjOJaQzqtat_Q' # Google Consumer Secret
REDIS_HOST = 'ec2-54-197-124-167.compute-1.amazonaws.com'
REDIS_PORT = 25580
REDIS_PASSWORD = 'pa1f0a5e4291cc48d7081c8a5195ab2ece84789299ebc80e35fe49c3df8cb99b2'
REDIS_USER = 'h'
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Tehran'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
CLOUDINARY_STORAGE = {
'CLOUD_NAME': 'dt0x3ff8y',
'API_KEY': '842463339847471',
'API_SECRET': 'd4CUuUKhO4JSVfy9DA41a4KhGGw',
}
DEFAULT_FILE_STORAGE = 'cloudinary_storage.storage.MediaCloudinaryStorage'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
[
"[email protected]"
] | |
32409fe7d932b0f82a86871030a3e70d8e6e1acc
|
8f6cc0e8bd15067f1d9161a4b178383e62377bc7
|
/evolutionary_algorithms/playground/fusion_V1001/procedure.py
|
fde6017dbb421b32ef689007b42182c302416ab6
|
[] |
no_license
|
humorbeing/python_github
|
9c4dfc61a3cefbb266fefff335f6b28d05797e5e
|
e4b4b49bee7e7e3843c6874717779ce8d619bd02
|
refs/heads/master
| 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 |
Python
|
UTF-8
|
Python
| false | false | 1,332 |
py
|
import numpy as np
from crossover import *
from mutation import *
from tools import *
def objective_function(point_in):
x_in = point_in[0]
y_in = point_in[1]
return (((x_in + 50) ** 2 + (y_in + 15) ** 2) / 4000) - (np.cos(x_in / 4) * np.cos(y_in / 4)) + 1
def initialize(mu_in, boundary_in):
x_new_generation = np.random.uniform(size=mu_in)
x_new_generation = x_new_generation * (boundary_in[1] - boundary_in[0]) + boundary_in[0]
y_new_generation = np.random.uniform(size=mu_in)
y_new_generation = y_new_generation * (boundary_in[3] - boundary_in[2]) + boundary_in[2]
new_gen = np.array([x_new_generation, y_new_generation])
return new_gen.T
def operate(gen_in, mu_in, lamb_da_in, boundary_in):
lambda_gen = crossover_UNDX(gen_in, mu_in, lamb_da_in)
lambda_gen = mutation_normal(lambda_gen)
return reflect_fix(lambda_gen, boundary_in)
def nominate(gen_in, lambda_gen_in):
cand = np.concatenate((gen_in, lambda_gen_in))
return cand
def evaluate(cand_in):
fit = []
for i in cand_in:
f = objective_function(i)
fit.append(f)
return np.array(fit)
def select(cand_in, fit_in, mu_in):
ind = np.argpartition(fit_in, -1 * mu_in)[-1 * mu_in:]
new_gen = []
for i in ind:
new_gen.append(cand_in[i])
return np.array(new_gen)
|
[
"[email protected]"
] | |
07f5b6e825f41b3d2981885837b11dd11464e4c4
|
5cf9fb9362559a69a3feb2e572c1089fbfd9dc24
|
/setup.py
|
11bf68236d6f20392396504839453e5c5e3c99f7
|
[
"MIT"
] |
permissive
|
akb89/nonce2vec
|
5f42943271a0054caa645d91c75e0f9cf6eacefe
|
23d3852904eb337d7ca24ea519463ee9ffa50fa5
|
refs/heads/master
| 2021-06-21T23:17:42.035144 | 2019-07-29T11:53:25 | 2019-07-29T11:53:25 | 129,858,554 | 4 | 1 |
MIT
| 2019-07-29T11:53:26 | 2018-04-17T06:42:39 |
Python
|
UTF-8
|
Python
| false | false | 2,006 |
py
|
#!/usr/bin/env python3
"""nonce2vec setup.py.
This file details modalities for packaging the nonce2vec application.
"""
from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='nonce2vec',
description='A python module to generate word embeddings from tiny data',
author=' Alexandre Kabbach and Aurélie Herbelot',
author_email='[email protected]',
long_description=long_description,
long_description_content_type='text/markdown',
version='2.0.0',
url='https://github.com/minimalparts/nonce2vec',
download_url='https://github.com/minimalparts/nonce2vec/#files',
license='MIT',
keywords=['word2vec', 'word-embeddings', 'incremental-learning'],
platforms=['any'],
packages=['nonce2vec', 'nonce2vec.utils', 'nonce2vec.models',
'nonce2vec.exceptions', 'nonce2vec.logging',
'nonce2vec.resources'],
package_data={'nonce2vec': ['logging/*.yml', 'resources/*']},
include_package_data=True,
entry_points={
'console_scripts': [
'n2v = nonce2vec.main:main'
],
},
install_requires=['pyyaml>=4.2b1', 'gensim==3.4.0', 'numpy==1.15.4',
'scipy==1.2.0'],
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing :: Linguistic'],
zip_safe=False,
)
|
[
"[email protected]"
] | |
da7cf50f5c8f9940b3e8f242219bd6e283dc4926
|
9433617418eab26490bb75e73cdd47317cbb935a
|
/python/script.py
|
b26f778319248cc21da82ba34ba0c56b0bc366bd
|
[] |
no_license
|
cdufour/tsrs21-scripting
|
3033d8413a8ca313986e612b73ef3e7b931c61b8
|
50b821176d7346001f49854791b6d47c090833c8
|
refs/heads/master
| 2022-04-18T10:16:47.048124 | 2020-04-20T07:50:53 | 2020-04-20T07:50:53 | 255,838,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 696 |
py
|
# script python
# firewall est une variable de type str
firewall = "192.168.0.17"
nom = "Chris" # type str
age = 120 # type int
tva = 5.5 # type float
contaminé = False # type bool
# Affichage
print(nom) # affiche le contenu de la variable
print("Formation Scripting") # affiche la chaîne de caractères
# Récupérer des saisies utilisateur
# saisie = input() # exemple: blabla
# Attention, la fonction input renvoie toujours un str
# il faut convertir la valeur en int si l'on souhaite faire des calculs
# avec la valeur saisie
saisie = int(input()) # conversion en int de la chaîne saisie
print("Valeur saisie: ", saisie) # affichage de la valeur saisie => blabla
print(type(saisie))
|
[
"[email protected]"
] | |
c6c6ddfa1a690619e1526367e462d7a01825013f
|
165478aa697ba81d7adc1dc3b081fc97ffafb723
|
/scripts/bdt_looper/xgboost/python/others/plot_prune.py
|
b9640c0234a648de143dd8419517f6f39fdcb3e6
|
[] |
no_license
|
zhangzc11/WVZLooper
|
d55c24127a65a36bd4a0ac25a8c53c007b5a71a1
|
4b2eb46392c228672d5a2db30539b49aeb58cd1c
|
refs/heads/readBDTNtuple
| 2020-05-20T16:28:18.838367 | 2019-09-27T19:19:57 | 2019-09-27T19:19:57 | 185,660,975 | 0 | 0 | null | 2019-05-08T18:39:19 | 2019-05-08T18:39:19 | null |
UTF-8
|
Python
| false | false | 681 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import os
test_name = 'xgb_wwz_vs_ttz_nbAll_full'
plotDir = "/home/users/zhicaiz/public_html/WWZ/BDT/"
name = []
AUC = []
with open("result_prune_ttZ.txt") as f:
lines = f.readlines()
for line in lines:
line_items = line.strip('\n').split()
name.append(line_items[0])
AUC.append(float(line_items[1]))
plt.figure()
plt.plot(name, AUC, lw=2)
plt.xticks(rotation=90)
plt.xlabel('cumulative removed features (left to right)')
plt.ylabel('AUC after removal')
plt.savefig(plotDir+'training/AUC_vs_removed_features_'+test_name+'.png', bbox_inches='tight')
os.system("chmod 755 "+plotDir+"training/*")
|
[
"[email protected]"
] | |
1fb6a3c298864d61c8bcb5c823457898adc4494b
|
3e917645a0e1375189c8ee8c1e93ed15348111ef
|
/projects/synthesis/intensification/archive/intensification_w_napplication/intensification_n_application.py
|
7c3748d0774424a4b983cf49852706984812818c
|
[] |
no_license
|
mbougie/gibbs
|
d4544e688ce2b63530535e1f5102328aece30e0d
|
39d5dc0866fc0dd149d0cf1f22bfd20911a9d29e
|
refs/heads/master
| 2021-01-12T06:59:27.214123 | 2020-01-07T15:48:12 | 2020-01-07T15:48:12 | 83,906,717 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,081 |
py
|
import arcpy, os
from arcpy import env
from arcpy.sa import *
import glob
arcpy.CheckOutExtension("Spatial")
arcpy.env.overwriteOutput = True
env.workspace = 'D:\\projects\\usxp\\deliverables\\maps\\synthesis\\intensification\\eric\\n_application\\n_application.gdb'
scene_list = list(range(1, 6))
years_list = list(range(2007, 2011))
# years_list = [2007]
print years_list
for scene in scene_list:
print 'scene', scene
processed_list = []
print 'processed_list', processed_list
for year in years_list:
print 'year', year
raster_list = glob.glob('D:\\projects\\usxp\\deliverables\\maps\\synthesis\\intensification\\eric\\n_application\\Scen{}\\*_{}.tif'.format(str(scene), str(year)))
print 'raster_list', raster_list
# Execute CellStatistics
processed_list.append(CellStatistics(raster_list, "SUM", "DATA"))
raster_list = None
raster_mean = CellStatistics(processed_list, "MEAN", "DATA")
del processed_list[:]
# Save the output
raster_mean.save("Napplication2007_2016mean_Scen{}".format(str(scene)))
raster_mean = None
|
[
"[email protected]"
] | |
6abf0934b78d45b1ea202bae662d5f97493e2d1b
|
63e3df8ef8de1de01cf8f2befc1cc1449416deba
|
/ARE/old/ARE_transposeCnn_linearLayer.py
|
ea4f1d78b1e42673a6cf107f471ba912dda10ebd
|
[] |
no_license
|
volpato30/R-D
|
b0ef1d34ee84f0a82c80840dfdaa53675de887ed
|
df44b26e9e832a150722d36b528eaa20b2a2ab9b
|
refs/heads/master
| 2020-05-29T14:41:17.037872 | 2016-11-04T03:34:53 | 2016-11-04T03:34:53 | 62,595,977 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,377 |
py
|
from __future__ import print_function
import os, sys, urllib, gzip
try:
import cPickle as pickle
except:
import pickle
sys.setrecursionlimit(10000)
import numpy as np
import lasagne
from lasagne.layers import Conv2DLayer, TransposedConv2DLayer, ReshapeLayer, DenseLayer, InputLayer
from lasagne.layers import get_output, Upscale2DLayer
from lasagne.nonlinearities import rectify, leaky_rectify, tanh
from lasagne.updates import nesterov_momentum
from lasagne.regularization import regularize_network_params,regularize_layer_params, l2, l1
import theano
import theano.tensor as T
import time
import matplotlib
import matplotlib.pyplot as plt
LABEL = sys.argv[1] if len(sys.argv) > 1 else '0'
ENCODE_SIZE = int(sys.argv[2]) if len(sys.argv) > 2 else 64
WEIGHT_FILE_NAME = './weights/ARE_transposeConv_linearLayer_BindW_encode_size{}'.format(ENCODE_SIZE)+'.npz'
with np.load('./data/lena_data.npz') as f:
data = [f['arr_%d' % i] for i in range(len(f.files))]
X_forward, X_forward_out, X_backward, X_backward_out = data
# X_forward shape : (100,40,1,72,72)
def get_layer_by_name(net, name):
for i, layer in enumerate(lasagne.layers.get_all_layers(net)):
if layer.name == name:
return layer, i
return None, None
def build_ARE(input_var=None, encode_size = 64):
l_in = InputLayer(shape=(None, X_forward.shape[2], X_forward.shape[3], X_forward.shape[4]),input_var=input_var)
conv1 = Conv2DLayer(l_in, 16, 6, stride=2, W=lasagne.init.Orthogonal('relu'), pad=0)
conv2 = Conv2DLayer(conv1, 32, 6, stride = 2, W=lasagne.init.Orthogonal('relu'), pad = 0)
conv3 = Conv2DLayer(conv2, 64, 5, stride = 2, W=lasagne.init.Orthogonal('relu'), pad = 0)
conv4 = Conv2DLayer(conv3, 128, 4, stride = 2, W=lasagne.init.Orthogonal('relu'), pad = 0)
reshape1 = ReshapeLayer(conv4, shape =(([0], -1)))
mid_size = np.prod(conv4.output_shape[1:])
encode_layer = DenseLayer(reshape1, name= 'encode', num_units= encode_size, W=lasagne.init.Orthogonal('relu'),\
nonlinearity=lasagne.nonlinearities.rectify)
action_layer = DenseLayer(encode_layer, name= 'action', num_units= encode_size, W=lasagne.init.Orthogonal(1.0),\
nonlinearity=None)
mid_layer = DenseLayer(action_layer, num_units = mid_size, W=lasagne.init.Orthogonal('relu'), nonlinearity=lasagne.nonlinearities.rectify)
reshape2 = ReshapeLayer(mid_layer, shape =(([0], conv4.output_shape[1], conv4.output_shape[2], conv4.output_shape[3])))
deconv1 = TransposedConv2DLayer(reshape2, conv4.input_shape[1],
conv4.filter_size, stride=conv4.stride, crop=0,
W=conv4.W, flip_filters=not conv4.flip_filters)
deconv2 = TransposedConv2DLayer(deconv1, conv3.input_shape[1],
conv3.filter_size, stride=conv3.stride, crop=0,
W=conv3.W, flip_filters=not conv3.flip_filters)
deconv3 = TransposedConv2DLayer(deconv2, conv2.input_shape[1],
conv2.filter_size, stride=conv2.stride, crop=0,
W=conv2.W, flip_filters=not conv2.flip_filters)
deconv4 = TransposedConv2DLayer(deconv3, conv1.input_shape[1],
conv1.filter_size, stride=conv1.stride, crop=0,
W=conv1.W, flip_filters=not conv1.flip_filters)
reshape3 = ReshapeLayer(deconv4, shape =(([0], -1)))
return reshape3
#
class ARE(object):
def __init__(self, lambda1 = 0, lambda2 = 0):
self.input_var = T.tensor4('inputs')
self.target_var = T.matrix('targets')
self.are_net = build_ARE(self.input_var, ENCODE_SIZE)
self.reconstructed = lasagne.layers.get_output(self.are_net)
self.encode_layer, _ = get_layer_by_name(self.are_net, 'encode')
self.action_layer, _ = get_layer_by_name(self.are_net, 'action')
self.encoded_feature = lasagne.layers.get_output(self.encode_layer)
self.transformed_feature = lasagne.layers.get_output(self.action_layer)
self.l1_penalty = regularize_network_params(self.are_net, l1)
self.loss = lasagne.objectives.squared_error(self.reconstructed, self.target_var)
self.XXT = T.dot(self.encoded_feature, self.encoded_feature.transpose()) + T.dot(self.transformed_feature, self.transformed_feature.transpose())
self.loss = self.loss.mean() + lambda1 * self.l1_penalty + lambda2 * self.XXT.trace()
self.loss = self.loss.mean() + lambda1 * self.l1_penalty
self.params = lasagne.layers.get_all_params(self.are_net, trainable=True)
self.l_r = theano.shared(np.array(0.01, dtype=theano.config.floatX))
self.updates = lasagne.updates.nesterov_momentum(
self.loss, self.params, learning_rate=self.l_r, momentum=0.90)
self.train_fn = theano.function([self.input_var, self.target_var], self.loss, updates=self.updates,on_unused_input='warn')
self.best_err = 999
self.action1_w = np.eye(ENCODE_SIZE, dtype = np.float32)
self.action1_b = np.zeros(ENCODE_SIZE, dtype = np.float32)
self.action2_w = np.eye(ENCODE_SIZE, dtype = np.float32)
self.action2_b = np.zeros(ENCODE_SIZE, dtype = np.float32)
# self.action3_w = np.eye(ENCODE_SIZE, dtype = np.float32)
# self.action3_b = np.zeros(ENCODE_SIZE, dtype = np.float32)
# self.action4_w = np.eye(ENCODE_SIZE, dtype = np.float32)
# self.action4_b = np.zeros(ENCODE_SIZE, dtype = np.float32)
def load_pretrained_model(self, file_name=WEIGHT_FILE_NAME):
with np.load(file_name) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
lasagne.layers.set_all_param_values(self.are_net, param_values)
def set_action_layer(self, action_id):
if action_id == 1:
self.action_layer.W.set_value(self.action1_w)
self.action_layer.b.set_value(self.action1_b)
elif action_id == 2:
self.action_layer.W.set_value(self.action2_w)
self.action_layer.b.set_value(self.action2_b)
elif action_id == 3:
self.action_layer.W.set_value(self.action3_w)
self.action_layer.b.set_value(self.action3_b)
elif action_id == 4:
self.action_layer.W.set_value(self.action4_w)
self.action_layer.b.set_value(self.action4_b)
else:
raise Exception('not a valid action')
def get_action_layer(self, action_id):
if action_id == 1:
self.action1_w = self.action_layer.W.get_value()
self.action1_b = self.action_layer.b.get_value()
elif action_id == 2:
self.action2_w = self.action_layer.W.get_value()
self.action2_b = self.action_layer.b.get_value()
elif action_id == 3:
self.action3_w = self.action_layer.W.get_value()
self.action3_b = self.action_layer.b.get_value()
elif action_id == 4:
self.action4_w = self.action_layer.W.get_value()
self.action4_b = self.action_layer.b.get_value()
else:
raise Exception('not a valid action')
def reset_loss(self, lambda1 = 0, lambda2 = 0):
self.loss = lasagne.objectives.squared_error(self.reconstructed, self.target_var)
self.loss = self.loss.mean() + lambda1 * self.l1_penalty + lambda2 * self.XXT.trace()
def train_ARE_network(self, num_epochs=50, verbose = True, save_model = False):
if verbose:
print("Starting training...")
for epoch in range(num_epochs):
start_time = time.time()
train_err = 0
self.set_action_layer(1)
for i in range(X_forward.shape[0]):
train_err1 = self.train_fn(X_forward[i], X_forward_out[i])
train_err += (train_err1)
self.get_action_layer(1)
self.set_action_layer(2)
for i in range(X_forward.shape[0]):
train_err2 = self.train_fn(X_backward[i], X_backward_out[i])
train_err += (train_err2)
self.get_action_layer(2)
train_err = train_err/float(2 * X_forward.shape[0])
if verbose:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print("training loss:\t\t{:.6f}".format(float(train_err)))
if save_model:
if train_err < self.best_err:
self.best_err = train_err
print('save best model which has train_err: {:.7f}'.format(self.best_err))
np.savez(WEIGHT_FILE_NAME, *lasagne.layers.get_all_param_values(self.are_net))
# main part
lena_are = ARE()
lena_are.l_r.set_value(0.1)
lena_are.train_ARE_network(num_epochs=10, verbose = True, save_model = True)
lena_are.load_pretrained_model()
lena_are.l_r.set_value(0.05)
lena_are.train_ARE_network(num_epochs=100, verbose = True, save_model = True)
lena_are.load_pretrained_model()
lena_are.l_r.set_value(0.01)
lena_are.train_ARE_network(num_epochs=500, verbose = True, save_model = True)
lena_are.load_pretrained_model()
lena_are.l_r.set_value(0.005)
lena_are.train_ARE_network(num_epochs=500, verbose = True, save_model = True)
|
[
"[email protected]"
] | |
f240c30cb5172ea541bcbfd8c0f9809f51eb3e65
|
e35fd52fe4367320024a26f2ee357755b5d5f4bd
|
/leetcode/problems/993.cousins-in-binary-tree.py
|
02241b00093f60a537071aa39a894753f03a8bcc
|
[] |
no_license
|
liseyko/CtCI
|
a451967b0a0ce108c491d30b81e88d20ad84d2cd
|
c27f19fac14b4acef8c631ad5569e1a5c29e9e1f
|
refs/heads/master
| 2020-03-21T14:28:47.621481 | 2019-11-12T22:59:07 | 2019-11-12T22:59:07 | 138,658,372 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,397 |
py
|
#
# @lc app=leetcode id=993 lang=python3
#
# [993] Cousins in Binary Tree
#
# https://leetcode.com/problems/cousins-in-binary-tree/description/
#
# algorithms
# Easy (51.80%)
# Total Accepted: 31.2K
# Total Submissions: 60.2K
# Testcase Example: '[1,2,3,4]\n4\n3'
#
# In a binary tree, the root node is at depth 0, and children of each depth k
# node are at depth k+1.
#
# Two nodes of a binary tree are cousins if they have the same depth, but have
# different parents.
#
# We are given the root of a binary tree with unique values, and the values x
# and y of two different nodes in the tree.
#
# Return true if and only if the nodes corresponding to the values x and y are
# cousins.
#
#
#
# Example 1:
#
#
#
# Input: root = [1,2,3,4], x = 4, y = 3
# Output: false
#
#
#
# Example 2:
#
#
#
# Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
# Output: true
#
#
#
# Example 3:
#
#
#
#
# Input: root = [1,2,3,null,4], x = 2, y = 3
# Output: false
#
#
#
#
#
# Note:
#
#
# The number of nodes in the tree will be between 2 and 100.
# Each node has a unique integer value from 1 to 100.
#
#
#
#
#
#
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isCousins(self, root: TreeNode, x: int, y: int) -> bool:
|
[
"[email protected]"
] | |
d36c884945479b6457878265527fd5a0c4ee9cf7
|
c833c6e9c1e1a4c387763cac2cc75079341e3122
|
/ua/univer/lesson04/matrix7.py
|
595c9937287e1d3726564633d5bb16e4345a2be3
|
[] |
no_license
|
mmveres/python05_12_2020
|
1513aa97d577f4921655ce0e58f28811df2bd14e
|
d21e6d3ecd90bdc3bd2a9b780bb58b65deb671f1
|
refs/heads/master
| 2023-03-02T16:00:26.036232 | 2021-02-13T17:58:13 | 2021-02-13T17:58:13 | 318,794,570 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,104 |
py
|
# 7. Характеристикой строки целочисленной матрицы
# назовем сумму ее положительных элементов.
# Переставляя строки заданной матрицы,
# расположить их в соответствии с ростом характеристик
def sort_matrix(matrix):
sum_row_list = []
for row in matrix:
sum = 0
for cell in row:
sum+=cell
sum_row_list.append(sum)
print(sum_row_list)
for j in range(len(sum_row_list)-1):
for i in range(len(sum_row_list)-1-j):
if sum_row_list[i]>sum_row_list[i+1]:
temp = sum_row_list[i]
temp_row = matrix[i]
sum_row_list[i] = sum_row_list[i+1]
matrix[i] = matrix[i+1]
sum_row_list[i+1] =temp
matrix[i+1] = temp_row
print(sum_row_list)
if __name__ == '__main__':
matrix=[[1,2,3,4],
[2,2,7,7],
[1,1,1,1]
]
matrix[1]
sort_matrix(matrix)
print(matrix)
|
[
"[email protected]"
] | |
20f55a20d271b66ded1ca1334804084325a6f804
|
7451cdc3c0a82fbca55268e2456541ca527899ff
|
/bioutils.py
|
693c817d3af32b0376bd038094ad58b83cc8b4aa
|
[] |
no_license
|
sefakilic/genome_parsing
|
e59bd5893f9f81f358385e18bd81dcd072d49207
|
abc2a55e8f443dc629b2da9316d0bed58911068b
|
refs/heads/master
| 2021-01-01T19:01:16.575138 | 2013-01-08T21:12:55 | 2013-01-08T21:12:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,533 |
py
|
from Bio import Entrez
from Bio import SeqIO
Entrez.email = "[email protected]"
def read_genbank(genome_accession_no, genbank_file=None):
"""Read genbank file. If the file is not given, based on the
genome_accession_no, grab it from NCBI and parse it. Return Sequence Record
object."""
if genbank_file:
print "reading genbank file %s" % genbank_file
seq_record = SeqIO.read(genbank_file, "genbank")
else:
print "downloading and parsing genbank file for %s" % genome_accession_no
handle = Entrez.efetch(db="nucleotide", rettype="gb",
retmode="text", id=genome_accession_no)
seq_record = SeqIO.read(handle, "gb")
handle.close()
return seq_record
def extract_genes(seq_record):
"""Given BioPython SeqRecord object as argument, return the list of all
genes where each gene is a SeqFeature object)"""
return [f for f in seq_record.features if f.type == "gene"]
def extract_cds(seq_record):
"""Given BioPython SeqRecord object as argument, return the list of all
coding sequences where each one is a SeqFeature object"""
return [f for f in seq_record.features if f.type == "CDS"]
def reverse_complement(seq):
return Seq(seq).reverse_complement().tostring()
def split_len(seq, length):
"""Given a string, returns a list containing _length_ sized pieces of the seq. For
example, split_len('abcdefgh', 3) = ['abc', 'def', 'gh']"""
return [seq[i:i+length] for i in range(0, len(seq), length)]
|
[
"[email protected]"
] | |
fef2bd8363cb76ff03054cdf1de671925adeaa3a
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqpt/frupower5min.py
|
da373519b491b8e0ff8e9dc61ef72f8e2f8412c4
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 11,760 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FruPower5min(Mo):
"""
A class that represents the most current statistics for FRU power in a 5 minute sampling interval. This class updates every 10 seconds.
"""
meta = StatsClassMeta("cobra.model.eqpt.FruPower5min", "FRU power")
counter = CounterMeta("drawn", CounterCategory.GAUGE, "watts", "power consumed")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "drawnLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "drawnMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "drawnMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "drawnAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "drawnSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "drawnTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "drawnThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "drawnTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "drawnTr"
meta._counters.append(counter)
meta.moClassName = "eqptFruPower5min"
meta.rnFormat = "CDeqptFruPower5min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current FRU power stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.eqpt.SysC")
meta.parentClasses.add("cobra.model.eqpt.FC")
meta.parentClasses.add("cobra.model.eqpt.SupC")
meta.parentClasses.add("cobra.model.eqpt.ExtChCard")
meta.parentClasses.add("cobra.model.eqpt.LC")
meta.parentClasses.add("cobra.model.eqpt.Ft")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.eqpt.FruPower")
meta.rnPrefixes = [
('CDeqptFruPower5min', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "drawnAvg", "drawnAvg", 8285, PropCategory.IMPLICIT_AVG)
prop.label = "power consumed average value"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnAvg", prop)
prop = PropMeta("str", "drawnLast", "drawnLast", 8282, PropCategory.IMPLICIT_LASTREADING)
prop.label = "power consumed current value"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnLast", prop)
prop = PropMeta("str", "drawnMax", "drawnMax", 8284, PropCategory.IMPLICIT_MAX)
prop.label = "power consumed maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnMax", prop)
prop = PropMeta("str", "drawnMin", "drawnMin", 8283, PropCategory.IMPLICIT_MIN)
prop.label = "power consumed minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnMin", prop)
prop = PropMeta("str", "drawnSpct", "drawnSpct", 8286, PropCategory.IMPLICIT_SUSPECT)
prop.label = "power consumed suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnSpct", prop)
prop = PropMeta("str", "drawnThr", "drawnThr", 8288, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "power consumed thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("drawnThr", prop)
prop = PropMeta("str", "drawnTr", "drawnTr", 8290, PropCategory.IMPLICIT_TREND)
prop.label = "power consumed trend"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnTr", prop)
prop = PropMeta("str", "drawnTrBase", "drawnTrBase", 8289, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "power consumed trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnTrBase", prop)
prop = PropMeta("str", "drawnTtl", "drawnTtl", 8287, PropCategory.IMPLICIT_TOTAL)
prop.label = "power consumed total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("drawnTtl", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptSlotToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
a4b9d47b216c8c2e086a0b545b18c0341dd97400
|
7970601ede43b4a35eb38aa4f04f55b20148af63
|
/migrations/schemas/app_schema.py
|
9026f0d5a5bec7659b38f032fae9a7946dc73cd8
|
[
"Apache-2.0"
] |
permissive
|
pythononwheels/copow
|
2d4423b12a8be4a13a06c4a7c55b9f2bb6bda542
|
49260b064223838962eb8cff4364580b3beb0067
|
refs/heads/master
| 2020-04-01T22:49:05.366052 | 2014-08-12T16:55:58 | 2014-08-12T16:55:58 | 20,783,739 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 614 |
py
|
#
#
# schema for the app model
# as an example there are already some attributes filled in.
# Generated: 2013/07/06 22:29:03
#
app = {
"name" : { "type" : "string", "default" : "#APPNAME" },
"path" : { "type" : "string" },
"lastversion" : { "type" : "integer" },
"currentversion" : { "type" : "integer" },
"maxversion" : { "type" : "integer" },
"last_updated" : { "type" : "date" },
"_id" : { "type" : "objectid" }
}
app_relations = {
#"comments" : "has_many"
}
|
[
"[email protected]"
] | |
7bd99954c1fecb313a53baa279142700dd5f728f
|
eba3e4a3935d6422d1ed85aaf69337f5ba15fc74
|
/pylons/tests/test_units/test_decorator_https.py
|
2275f5de94535a1ec3ecf161f07288940831d9e5
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] |
permissive
|
arianepaola/tg2jython
|
2ae74250ca43b021323ef0951a9763712c2eb3d6
|
971b9c3eb8ca941d1797bb4b458f275bdca5a2cb
|
refs/heads/master
| 2021-01-21T12:07:48.815690 | 2009-03-27T02:38:11 | 2009-03-27T02:38:11 | 160,242 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,689 |
py
|
from paste.fixture import TestApp
from paste.registry import RegistryManager
from routes.middleware import RoutesMiddleware
from pylons.decorators.secure import https
from pylons.controllers import WSGIController
from pylons.testutil import ControllerWrap, SetupCacheGlobal
from __init__ import TestWSGIController
class HttpsController(WSGIController):
def index(self):
return 'index page'
index = https('/pylons')(index)
def login(self):
return 'login page'
login = https(controller='auth', action='login')(login)
def get(self):
return 'get page'
get = https()(get)
class TestHttpsDecorator(TestWSGIController):
def setUp(self):
TestWSGIController.setUp(self)
from routes import Mapper
map = Mapper()
map.connect('/:action')
map.connect('/:action/:id')
map.connect('/:controller/:action/:id')
map.connect('/:controller/:action')
app = ControllerWrap(HttpsController)
app = SetupCacheGlobal(app, self.environ, setup_cache=False)
app = RoutesMiddleware(app, map)
app = RegistryManager(app)
self.app = TestApp(app)
def test_https_explicit_path(self):
self.environ['pylons.routes_dict']['action'] = 'index'
response = self.app.get('/index', status=302)
assert response.header_dict.get('location') == \
'https://localhost/pylons'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/index', status=200)
assert 'location' not in response.header_dict
assert 'index page' in response
def test_https_disallows_post(self):
self.environ['pylons.routes_dict']['action'] = 'index'
response = self.app.post('/index', status=405)
def test_https_url_for_kwargs(self):
self.environ['pylons.routes_dict']['action'] = 'login'
response = self.app.get('/login', status=302)
assert response.header_dict.get('location') == \
'https://localhost/auth/login'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/login', status=200)
assert 'location' not in response.header_dict
assert 'login page' in response
def test_https_redirect_to_self(self):
self.environ['pylons.routes_dict']['action'] = 'get'
response = self.app.get('/get', status=302)
assert response.header_dict.get('location') == \
'https://localhost/get'
self.environ['wsgi.url_scheme'] = 'https'
response = self.app.get('/get', status=200)
assert 'location' not in response.header_dict
assert 'get page' in response
|
[
"ariane@venus.(none)"
] |
ariane@venus.(none)
|
a69a9f68f25096e15e868d3dff4bcf6438075a19
|
d1ab7452d6449a4d6b99177a2c1d44d3231283c5
|
/reagent/gym/tests/test_gym.py
|
e24b00f736409f7cfff6788607410c22c0d570ed
|
[
"BSD-3-Clause"
] |
permissive
|
ojaswa-privado/ReAgent
|
67e3fb7d52b39e6feb4ab4537691d20c99fde323
|
e990e66f69369cbe89212e334191180716c9bf4e
|
refs/heads/master
| 2023-03-24T22:20:52.342277 | 2021-03-18T12:38:23 | 2021-03-18T12:38:23 | 348,598,068 | 0 | 0 |
BSD-3-Clause
| 2021-03-18T12:38:24 | 2021-03-17T06:05:53 | null |
UTF-8
|
Python
| false | false | 13,455 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import os
import pprint
import unittest
from typing import Optional, Dict, Any
import numpy as np
import pytest
import pytorch_lightning as pl
import torch
from parameterized import parameterized
from reagent.core.tensorboardX import summary_writer_context
from reagent.gym.agents.agent import Agent
from reagent.gym.agents.post_episode import train_post_episode
from reagent.gym.datasets.episodic_dataset import (
EpisodicDataset,
)
from reagent.gym.datasets.replay_buffer_dataset import ReplayBufferDataset
from reagent.gym.envs import Env__Union
from reagent.gym.envs.env_wrapper import EnvWrapper
from reagent.gym.policies.policy import Policy
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes, run_episode
from reagent.gym.types import PostEpisode, PostStep
from reagent.gym.utils import build_normalizer, fill_replay_buffer
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.test.base.horizon_test_base import HorizonTestBase
from reagent.training.trainer import Trainer
from reagent.workflow.model_managers.union import ModelManager__Union
from reagent.workflow.types import RewardOptions
from torch.utils.tensorboard import SummaryWriter
from tqdm import trange
# for seeding the environment
SEED = 0
# exponential moving average parameter for tracking reward progress
REWARD_DECAY = 0.8
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Put on-policy gym tests here in the format (test name, path to yaml config).
Format path to be: "configs/<env_name>/<model_name>_<env_name>_online.yaml."
NOTE: These tests should ideally finish quickly (within 10 minutes) since they are
unit tests which are run many times.
"""
REPLAY_BUFFER_GYM_TESTS = [
("Discrete CRR Cartpole", "configs/cartpole/discrete_crr_cartpole_online.yaml"),
("Discrete DQN Cartpole", "configs/cartpole/discrete_dqn_cartpole_online.yaml"),
("Discrete C51 Cartpole", "configs/cartpole/discrete_c51_cartpole_online.yaml"),
("Discrete QR Cartpole", "configs/cartpole/discrete_qr_cartpole_online.yaml"),
(
"Discrete DQN Open Gridworld",
"configs/open_gridworld/discrete_dqn_open_gridworld.yaml",
),
("SAC Pendulum", "configs/pendulum/sac_pendulum_online.yaml"),
("Continuous CRR Pendulum", "configs/pendulum/continuous_crr_pendulum_online.yaml"),
("TD3 Pendulum", "configs/pendulum/td3_pendulum_online.yaml"),
("Parametric DQN Cartpole", "configs/cartpole/parametric_dqn_cartpole_online.yaml"),
(
"Parametric SARSA Cartpole",
"configs/cartpole/parametric_sarsa_cartpole_online.yaml",
),
(
"Sparse DQN Changing Arms",
"configs/sparse/discrete_dqn_changing_arms_online.yaml",
),
("SlateQ RecSim", "configs/recsim/slate_q_recsim_online.yaml"),
("PossibleActionsMask DQN", "configs/functionality/dqn_possible_actions_mask.yaml"),
]
ONLINE_EPISODE_GYM_TESTS = [
(
"REINFORCE Cartpole online",
"configs/cartpole/discrete_reinforce_cartpole_online.yaml",
),
(
"PPO Cartpole online",
"configs/cartpole/discrete_ppo_cartpole_online.yaml",
),
]
curr_dir = os.path.dirname(__file__)
class TestGym(HorizonTestBase):
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS)
def test_replay_buffer_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test_replay_buffer,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(REPLAY_BUFFER_GYM_TESTS)
@pytest.mark.serial
# pyre-fixme[56]: Argument `not torch.cuda.is_available()` to decorator factory
# `unittest.skipIf` could not be resolved in a global scope.
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
def test_replay_buffer_gym_gpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on GPU")
self.run_from_config(
run_test=run_test_replay_buffer,
config_path=os.path.join(curr_dir, config_path),
use_gpu=True,
)
logger.info(f"{name} passes!")
# pyre-fixme[16]: Module `parameterized` has no attribute `expand`.
@parameterized.expand(ONLINE_EPISODE_GYM_TESTS)
def test_online_episode_gym_cpu(self, name: str, config_path: str):
logger.info(f"Starting {name} on CPU")
self.run_from_config(
run_test=run_test_online_episode,
config_path=os.path.join(curr_dir, config_path),
use_gpu=False,
)
logger.info(f"{name} passes!")
def train_policy(
env: EnvWrapper,
training_policy: Policy,
num_train_episodes: int,
post_step: Optional[PostStep] = None,
post_episode: Optional[PostEpisode] = None,
use_gpu: bool = False,
) -> np.ndarray:
device = torch.device("cuda") if use_gpu else torch.device("cpu")
agent = Agent.create_for_env(
env,
policy=training_policy,
post_transition_callback=post_step,
post_episode_callback=post_episode,
device=device,
)
running_reward = 0
writer = SummaryWriter()
with summary_writer_context(writer):
train_rewards = []
with trange(num_train_episodes, unit=" epoch") as t:
for i in t:
# Note: run_episode also performs a training step for the agent, if specified in post_step
trajectory = run_episode(env=env, agent=agent, mdp_id=i, max_steps=200)
ep_reward = trajectory.calculate_cumulative_reward()
train_rewards.append(ep_reward)
running_reward *= REWARD_DECAY
running_reward += (1 - REWARD_DECAY) * ep_reward
t.set_postfix(reward=running_reward)
logger.info("============Train rewards=============")
logger.info(train_rewards)
logger.info(f"average: {np.mean(train_rewards)};\tmax: {np.max(train_rewards)}")
return np.array(train_rewards)
def eval_policy(
env: EnvWrapper,
serving_policy: Policy,
num_eval_episodes: int,
serving: bool = True,
) -> np.ndarray:
agent = (
Agent.create_for_env_with_serving_policy(env, serving_policy)
if serving
else Agent.create_for_env(env, serving_policy)
)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes,
env=env,
agent=agent,
max_steps=env.max_steps,
num_processes=1,
).squeeze(1)
logger.info("============Eval rewards==============")
logger.info(eval_rewards)
mean_eval = np.mean(eval_rewards)
logger.info(f"average: {mean_eval};\tmax: {np.max(eval_rewards)}")
return np.array(eval_rewards)
def identity_collate(batch):
assert isinstance(batch, list) and len(batch) == 1, f"Got {batch}"
return batch[0]
def run_test_replay_buffer(
env: Env__Union,
model: ModelManager__Union,
replay_memory_size: int,
train_every_ts: int,
train_after_ts: int,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
minibatch_size: Optional[int] = None,
):
"""
Run an online learning test with a replay buffer. The replay buffer is pre-filled, then the training starts.
Each transition is added to the replay buffer immediately after it takes place.
"""
env = env.value
# pyre-fixme[16]: Module `pl` has no attribute `seed_everything`.
pl.seed_everything(SEED)
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager = model.value
trainer = manager.initialize_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
normalization_data_map=normalization,
)
training_policy = manager.create_policy(serving=False)
# pyre-fixme[16]: Module `pl` has no attribute `LightningModule`.
if not isinstance(trainer, pl.LightningModule):
if minibatch_size is None:
minibatch_size = trainer.minibatch_size
assert minibatch_size == trainer.minibatch_size
assert minibatch_size is not None
replay_buffer = ReplayBuffer(
replay_capacity=replay_memory_size, batch_size=minibatch_size
)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
# first fill the replay buffer using random policy
train_after_ts = max(train_after_ts, minibatch_size)
fill_replay_buffer(
env=env, replay_buffer=replay_buffer, desired_size=train_after_ts
)
agent = Agent.create_for_env(env, policy=training_policy, device=device)
# TODO: Simplify this setup by creating LightningDataModule
dataset = ReplayBufferDataset.create_for_trainer(
trainer,
env,
agent,
replay_buffer,
batch_size=minibatch_size,
training_frequency=train_every_ts,
num_episodes=num_train_episodes,
max_steps=200,
device=device,
)
data_loader = torch.utils.data.DataLoader(dataset, collate_fn=identity_collate)
# pyre-fixme[16]: Module `pl` has no attribute `Trainer`.
pl_trainer = pl.Trainer(max_epochs=1, gpus=int(use_gpu))
# Note: the fit() function below also evaluates the agent along the way
# and adds the new transitions to the replay buffer, so it is training
# on incrementally larger and larger buffers.
pl_trainer.fit(trainer, data_loader)
# TODO: Also check train_reward
serving_policy = manager.create_policy(serving=True)
eval_rewards = eval_policy(env, serving_policy, num_eval_episodes, serving=True)
assert (
eval_rewards.mean() >= passing_score_bar
), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n"
def run_test_online_episode(
env: Env__Union,
model: ModelManager__Union,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool,
):
"""
Run an online learning test. At the end of each episode training is run on the trajectory.
"""
env = env.value
# pyre-fixme[16]: Module `pl` has no attribute `seed_everything`.
pl.seed_everything(SEED)
env.seed(SEED)
env.action_space.seed(SEED)
normalization = build_normalizer(env)
logger.info(f"Normalization is: \n{pprint.pformat(normalization)}")
manager = model.value
trainer = manager.initialize_trainer(
use_gpu=use_gpu,
reward_options=RewardOptions(),
normalization_data_map=normalization,
)
policy = manager.create_policy(serving=False)
device = torch.device("cuda") if use_gpu else torch.device("cpu")
agent = Agent.create_for_env(env, policy, device=device)
# pyre-fixme[16]: Module `pl` has no attribute `LightningModule`.
if isinstance(trainer, pl.LightningModule):
# pyre-fixme[16]: Module `pl` has no attribute `Trainer`.
pl_trainer = pl.Trainer(max_epochs=1, gpus=int(use_gpu), deterministic=True)
dataset = EpisodicDataset(
env=env, agent=agent, num_episodes=num_train_episodes, seed=SEED
)
pl_trainer.fit(trainer, dataset)
else:
post_episode_callback = train_post_episode(env, trainer, use_gpu)
_ = train_policy(
env,
policy,
num_train_episodes,
post_step=None,
post_episode=post_episode_callback,
use_gpu=use_gpu,
)
eval_rewards = evaluate_for_n_episodes(
n=num_eval_episodes,
env=env,
agent=agent,
max_steps=env.max_steps,
num_processes=1,
).squeeze(1)
assert (
eval_rewards.mean() >= passing_score_bar
), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n"
def run_test_episode_buffer(
env: EnvWrapper,
policy: Policy,
trainer: Trainer,
num_train_episodes: int,
passing_score_bar: float,
num_eval_episodes: int,
use_gpu: bool = False,
):
# pyre-fixme[16]: Module `pl` has no attribute `seed_everything`.
pl.seed_everything(SEED)
env.seed(SEED)
env.action_space.seed(SEED)
post_episode_callback = train_post_episode(env, trainer, use_gpu)
train_rewards = train_policy(
env,
policy,
num_train_episodes,
post_step=None,
post_episode=post_episode_callback,
use_gpu=use_gpu,
)
# Check whether the max score passed the score bar; we explore during training
# the return could be bad (leading to flakiness in C51 and QRDQN).
assert np.max(train_rewards) >= passing_score_bar, (
f"max reward ({np.max(train_rewards)}) after training for "
f"{len(train_rewards)} episodes is less than < {passing_score_bar}.\n"
)
serving_policy = policy
eval_rewards = eval_policy(env, serving_policy, num_eval_episodes, serving=False)
assert (
eval_rewards.mean() >= passing_score_bar
), f"Eval reward is {eval_rewards.mean()}, less than < {passing_score_bar}.\n"
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
b52ec39cb80d05dec46a86f5a6d0b39823d6ce17
|
5eb9c3473902c20eac6401ec234d0ec496b19b90
|
/tests/test_utils.py
|
c04b49a12b01d7c30beaf52edfbc41b51a4d380f
|
[
"ISC"
] |
permissive
|
associatedpress/datakit-project
|
236650f61621a6403581d86664d403a096acd0d6
|
6906a3da3a957cfdc3c798a47dcd9e9cf5166f8f
|
refs/heads/main
| 2023-01-15T13:03:42.766693 | 2022-12-06T14:57:02 | 2022-12-06T14:57:02 | 80,869,394 | 25 | 4 |
ISC
| 2022-12-26T19:45:41 | 2017-02-03T21:08:31 |
Python
|
UTF-8
|
Python
| false | false | 1,103 |
py
|
import os
import cookiecutter.config as cc_config
import pytest
from datakit_project.utils import resolve_repo_dir
# TODO: Update resolve_repo_dir to use cookiecutter DEFAULT_CONFIG
# then monkeypatch the variable here
def test_repo_dir_for_local_repo():
"""
Should be fully-qualified path to local directory
"""
local_dir = '/Local/path/to/fake-repo'
actual_dir = resolve_repo_dir(local_dir)
assert local_dir == actual_dir
def test_repo_dir_for_alias():
"""
Should be path inside of cookiecutter's dir.
"""
cc_home = cc_config.DEFAULT_CONFIG['cookiecutters_dir']
expected_dir = os.path.join(cc_home, 'fake-repo')
actual_dir = resolve_repo_dir('gh:associatedpress/fake-repo')
assert expected_dir == actual_dir
def test_repo_dir_for_url():
"""
Should be path inside of cookiecutter's dir.
"""
cc_home = cc_config.DEFAULT_CONFIG['cookiecutters_dir']
expected_dir = os.path.join(cc_home, 'fake-repo')
actual_dir = resolve_repo_dir('https://github.com/associatedpress/fake-repo.git')
assert expected_dir == actual_dir
|
[
"[email protected]"
] | |
124d53870ab1f32476f7396ada3d0a47298746de
|
162eed4191aef4431f94a0db1ad4185b6daa6f67
|
/supervised_learning/0x02-tensorflow/5-create_train_op.py
|
05322a25cd88afe944df96ecf16ba5df91cd92c0
|
[] |
no_license
|
giovannyortegon/holbertonschool-machine_learning
|
d6897bfb492f9d266302930927416637be3c172d
|
8cd5e0f837a5c0facbf73647dcc9c6a3b1b1b9e0
|
refs/heads/master
| 2022-12-31T15:34:20.489690 | 2020-10-24T03:37:01 | 2020-10-24T03:37:01 | 279,656,017 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
#!/usr/bin/env python3
""" train operation """
import tensorflow as tf
def create_train_op(loss, alpha):
""" create_train_op - creates the training operation.
Args:
loss is the loss of the network’s prediction
alpha is the learning rate
Returns:
an operation that trains the network using gradient descent
"""
train_op = tf.train.GradientDescentOptimizer(alpha)
grads = train_op.compute_gradients(loss)
return train_op.apply_gradients(grads)
|
[
"[email protected]"
] | |
3cbe850887808dfaf2d2e3096c804e2335696bb5
|
556357a07c95176d8aa260795eb99b15970a1135
|
/AE/TY_06_REPORT_PART_2.py
|
f5f3618610c62d4d81891a755b503c33c3f2e628
|
[] |
no_license
|
sanjaymanegit/testRepo
|
db11ba5d02d47b78f6c9762c9a0628b22946e446
|
92a02cbe0add7d14b751b79e612e85a062e37498
|
refs/heads/master
| 2023-09-02T07:46:35.164047 | 2023-08-29T12:53:49 | 2023-08-29T12:53:49 | 42,922,622 | 1 | 1 | null | 2023-04-16T18:45:53 | 2015-09-22T08:53:13 |
Python
|
UTF-8
|
Python
| false | false | 75,157 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'TY_06_REPORT_PART_2.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.Qt import QTableWidgetItem
import sqlite3
class TY_06_Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1366, 768)
MainWindow.setBaseSize(QtCore.QSize(15, 11))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(30, 30, 1331, 705))
self.frame.setStyleSheet("background-color: rgb(215, 255, 252);")
'''
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
'''
self.frame.setFrameShape(QtWidgets.QFrame.Box)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setLineWidth(3)
#self.frame.setStyleSheet("background-color: rgb(221, 255, 234);")
self.frame.setObjectName("frame")
self.shape=""
self.unit_typex=""
self.lastIndex=13
self.shear_mod_ip=""
self.label_6 = QtWidgets.QLabel(self.frame)
self.label_6.setGeometry(QtCore.QRect(540, 30, 211, 41))
font = QtGui.QFont()
font.setFamily("MS Sans Serif")
font.setPointSize(16)
self.label_6.setFont(font)
self.label_6.setStyleSheet("color: rgb(0, 85, 255);")
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.label_6_1 = QtWidgets.QLabel(self.frame)
self.label_6_1.setGeometry(QtCore.QRect(840, 30, 351, 41))
font = QtGui.QFont()
font.setFamily("MS Sans Serif")
font.setPointSize(10)
self.label_6_1.setFont(font)
#self.label_6.setStyleSheet("color: rgb(0, 85, 255);")
self.label_6_1.setAlignment(QtCore.Qt.AlignCenter)
self.label_6_1.setObjectName("label_6_1")
self.pushButton_14 = QtWidgets.QPushButton(self.frame)
self.pushButton_14.setGeometry(QtCore.QRect(570, 600, 131, 41))
font = QtGui.QFont()
font.setFamily("MS Sans Serif")
font.setPointSize(10)
self.pushButton_14.setFont(font)
self.pushButton_14.setObjectName("pushButton_14")
self.tableWidget = QtWidgets.QTableWidget(self.frame)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.setGeometry(QtCore.QRect(20, 111, 1291, 411))
self.tableWidget.setObjectName("tableWidget")
#self.tableWidget.setStyleSheet("background-color: rgb(221, 255, 234);")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
'''
self.tableWidget_2 = QtWidgets.QTableWidget(self.frame)
self.tableWidget_2.setGeometry(QtCore.QRect(670, 110, 641, 411))
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(0)
self.tableWidget_2.setRowCount(0)
'''
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1366, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.test_type=""
self.def_flg=""
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_6.setText(_translate("MainWindow", "Report Part II"))
self.label_6_1.setText(_translate("MainWindow", " [ Test Id: 265 ] [ Batch Id : 3452321qwe ] "))
self.pushButton_14.setText(_translate("MainWindow", "Return"))
self.pushButton_14.clicked.connect(MainWindow.close)
self.def_flg=""
#self.select_all_rows_2()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT TEST_ID,BATCH_ID,TEST_TYPE,DEF_FLG FROM TEST_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR)")
for x in results:
self.label_6_1.setText("[ Test Id: "+str(x[0])+" ] [ Batch Id:"+str(x[1])+" ]")
self.test_type=str(x[2])
self.def_flg=str(x[3])
connection.close()
if(self.test_type=="Compress"):
self.select_all_rows_compress()
elif(self.test_type=="Tear"):
self.select_all_rows_tear()
elif(self.test_type=="Flexural"):
self.select_all_rows_flexural()
elif(self.test_type=="QLSS"):
self.select_all_rows_qlss()
elif(self.test_type=="ILSS"):
self.select_all_rows_ilss()
elif(self.test_type=="COF"):
self.select_all_rows_cof()
else:
if(self.def_flg=="Y"):
self.guage_select_all_rows()
else:
self.select_all_rows()
def delete_all_records(self):
i = self.tableWidget.rowCount()
while (i>0):
i=i-1
self.tableWidget.removeRow(i)
def select_all_rows(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(14)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
#self.tableWidget.horizontalHeader().setStyleSheet("::section {background-color : lightGray;font-size:10pt;}")
#self.tableWidget.setRowCount(1)
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 50)
self.tableWidget.setColumnWidth(1, 80)
self.tableWidget.setColumnWidth(2, 80)
self.tableWidget.setColumnWidth(3, 80)
self.tableWidget.setColumnWidth(4, 120)
self.tableWidget.setColumnWidth(5, 80)
self.tableWidget.setColumnWidth(6, 80)
self.tableWidget.setColumnWidth(7, 120)
self.tableWidget.setColumnWidth(8, 80)
self.tableWidget.setColumnWidth(9, 120)
self.tableWidget.setColumnWidth(10, 100)
self.tableWidget.setColumnWidth(11, 100)
self.tableWidget.setColumnWidth(12, 100)
self.tableWidget.setColumnWidth(13, 100)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
# SELECT SHAPE FROM SPECIMEN_MST WHERE SPECIMEN_NAME IN ( SELECT SPECIMEN_NAME FROM TEST_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR))
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT SHAPE FROM SPECIMEN_MST WHERE SPECIMEN_NAME IN ( SELECT SPECIMEN_NAME FROM TEST_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR))")
for x in results:
self.shape=x[0]
connection.close()
#self.shape='Pipe'
print ("shape :"+self.shape)
if (self.shape=="Rectangle"):
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (Inch) ', ' Width \n (Inch) ', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod % (Lb/Inch2)' ])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (mm) ', ' Width \n (mm) ', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (mm) ', ' Width \n (mm) ', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (cm) ', ' Width \n (cm) ', 'CS.Area \n (cm2)','Force at Peak \n (Kgf)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kgf/Cm2)','Mod@100% \n (Kgf/Cm2)','Mod@200% \n (Kgf/Cm2)','Mod@300% \n (Kgf/Cm2)','Mod %'])
elif (self.shape=="Cylindrical"):
self.tableWidget.setColumnCount(13)
self.lastIndex=12
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod % (Lb/Inch2)'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (cm)', 'CS.Area \n (cm2)','Force at Peak \n (Kg)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
elif (self.shape=="Pipe"):
self.tableWidget.setColumnCount(14)
self.lastIndex=13
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (Inch)', 'Out. Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod % (Lb/Inch2)'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (Inch)', 'Out. Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (N)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (mm)', 'Out. Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (cm)', 'Out. Diameter \n (cm)', 'CS.Area \n (cm2)','Force at Peak \n (Kgf)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kgf/Cm2)','Mod@100% \n (Kgf/Cm2)','Mod@200% \n (Kgf/Cm2)','Mod@300% \n (Kgf/Cm2)','Mod %'])
elif (self.shape=="DirectValue"):
self.tableWidget.setColumnCount(12)
self.lastIndex=11
if(self.unit_typex=="Lb/Inch"):
#print("header")
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod %'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (cm2)','Force at Peak \n (Kg)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Thickness \n (mm)', 'Width \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (kg)' ,'% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT MOD_AT_ANY FROM REPORT_MST WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
for rows in results:
print(" self.lastIndex :"+str(self.lastIndex))
item = self.tableWidget.horizontalHeaderItem(self.lastIndex)
if(self.unit_typex=="Lb/Inch"):
item.setText("Mod@"+str(rows[0])+"% \n (Lb/Inch2)")
elif(self.unit_typex == "Newton/Mm"):
item.setText("Mod@"+str(rows[0])+"% \n (N/Mm2)")
elif(self.unit_typex == "MPA"):
item.setText("Mod@"+str(rows[0])+"% \n (MPA)")
else:
item.setText("Mod@"+str(rows[0])+"% \n (Kgf/Cm2)")
connection = sqlite3.connect("tyr.db")
print("shape : "+str(self.shape))
if (self.shape=="Rectangle"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", THICKNESS),printf(\"%.2f\", WIDTH),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD) ,printf(\"%.2f\", PREC_E_AT_BREAK) ,printf(\"%.2f\", TENSILE_STRENGTH) ,printf(\"%.2f\", MODULUS_100) ,printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.WIDTH),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD) ,printf(\"%.2f\", PREC_E_AT_BREAK) ,printf(\"%.2f\", TENSILE_STRENGTH) ,printf(\"%.2f\", MODULUS_100) ,printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID,round(TENSILE_STRENGTH,2),round(MODULUS_100,2),round(MODULUS_200,2),round(MODULUS_300,2),round(MOD_AT_ANY,2) FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="Cylindrical"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", DIAMETER),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", BREAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.DIAMETER),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", BREAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="Pipe"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", INN_DIAMETER),printf(\"%.2f\", OUT_DIAMTER),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.INN_DIAMETER),printf(\"%.2f\", A.OUT_DIAMTER),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="DirectValue"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
else:
print("NO Val")
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
connection.close()
def guage_select_all_rows(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(8)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
#self.tableWidget.horizontalHeader().setStyleSheet("::section {background-color : lightGray;font-size:10pt;}")
#self.tableWidget.setRowCount(1)
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 150)
self.tableWidget.setColumnWidth(1, 150)
self.tableWidget.setColumnWidth(2, 150)
self.tableWidget.setColumnWidth(3, 150)
self.tableWidget.setColumnWidth(4, 150)
self.tableWidget.setColumnWidth(5, 150)
self.tableWidget.setColumnWidth(6, 150)
self.tableWidget.setColumnWidth(7, 150)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
# SELECT SHAPE FROM SPECIMEN_MST WHERE SPECIMEN_NAME IN ( SELECT SPECIMEN_NAME FROM TEST_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR))
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT SHAPE FROM SPECIMEN_MST WHERE SPECIMEN_NAME IN ( SELECT SPECIMEN_NAME FROM TEST_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR))")
for x in results:
self.shape=x[0]
connection.close()
#self.shape='Pipe'
print ("shape :"+self.shape)
if (self.shape=="Rectangle"):
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (Inch) ', ' Width \n (Inch) ', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'% E@Break','Tensile Strength \n (Lb/Inch2)','Yeild Strength \n (Lb/Inch2)' ])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (mm) ', ' Width \n (mm) ', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'% E','Tensile Strength \n (N/Mm2)','Yeild Strength \n (N/Mm2)'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (mm) ', ' Width \n (mm) ', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'% E ','Tensile Strength \n (MPA)','Yeild Strength \n (MPA)'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe.No.', ' Thickness \n (cm) ', ' Width \n (cm) ', 'CS.Area \n (cm2)','Force at Peak \n (Kgf)' ,'% E','Tensile Strength \n (Kgf/Cm2)','Yeild Strength \n (Kgf/Cm2)'])
elif (self.shape=="Cylindrical"):
self.tableWidget.setColumnCount(13)
self.lastIndex=12
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Yeild Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod % (Lb/Inch2)'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Yeild Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Yeild Strength \n (N/mm2)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Diameter \n (cm)', 'CS.Area \n (cm2)','Force at Peak \n (Kg)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Yeild Strength \n (Kgf/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
elif (self.shape=="Pipe"):
self.tableWidget.setColumnCount(14)
self.lastIndex=13
if(self.unit_typex=="Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (Inch)', 'Out. Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Yeild Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod % (Lb/Inch2)'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (Inch)', 'Out. Diameter \n (Inch)', 'CS.Area \n (Inch2)','Force at Peak \n (N)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Yeild Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (mm)', 'Out. Diameter \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Yeild Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Inn.Diameter \n (cm)', 'Out. Diameter \n (cm)', 'CS.Area \n (cm2)','Force at Peak \n (Kgf)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kgf/Cm2)','Yeild Strength \n (Kg.Cm2)','Mod@100% \n (Kgf/Cm2)','Mod@200% \n (Kgf/Cm2)','Mod@300% \n (Kgf/Cm2)','Mod %'])
elif (self.shape=="DirectValue"):
self.tableWidget.setColumnCount(12)
self.lastIndex=11
if(self.unit_typex=="Lb/Inch"):
#print("header")
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (Inch2)','Force at Peak \n (Lb)' ,'E@Peak \n (Inch)','% E@Peak','E@Break \n (Inch)','% E@Break','Tensile Strength \n (Lb/Inch2)','Yeild Strength \n (Lb/Inch2)','Mod@100% \n (Lb/Inch2)','Mod@200% \n (Lb/Inch2)','Mod@300% \n (Lb/Inch2)','Mod %'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (N/Mm2)','Yeild Strength \n (N/Mm2)','Mod@100% \n (N/Mm2)','Mod@200% \n (N/Mm2)','Mod@300% \n (N/Mm2)','Mod %'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (mm2)','Force at Peak \n (N)' ,'E@Peak \n (mm)','% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (MPA)','Yeild Strength \n (MPA)','Mod@100% \n (MPA)','Mod@200% \n (MPA)','Mod@300% \n (MPA)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'CS.Area \n (cm2)','Force at Peak \n (Kg)' ,'E@Peak \n (cm)','% E@Peak','E@Break \n (cm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Yeild Strength \n (Kg/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spe. No.', 'Thickness \n (mm)', 'Width \n (mm)', 'CS.Area \n (mm2)','Force at Peak \n (kg)' ,'% E@Peak','E@Break \n (mm)','% E@Break','Tensile Strength \n (Kg/Cm2)','Yeild Strength \n (Kgf/Cm2)','Mod@100% \n (Kg/Cm2)','Mod@200% \n (Kg/Cm2)','Mod@300% \n (Kg/Cm2)','Mod %'])
'''
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT MOD_AT_ANY FROM REPORT_MST WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
for rows in results:
print(" self.lastIndex :"+str(self.lastIndex))
item = self.tableWidget.horizontalHeaderItem(self.lastIndex)
if(self.unit_typex=="Lb/Inch"):
item.setText("Mod@"+str(rows[0])+"% \n (Lb/Inch2)")
elif(self.unit_typex == "Newton/Mm"):
item.setText("Mod@"+str(rows[0])+"% \n (N/Mm2)")
elif(self.unit_typex == "MPA"):
item.setText("Mod@"+str(rows[0])+"% \n (MPA)")
else:
item.setText("Mod@"+str(rows[0])+"% \n (Kgf/Cm2)")
'''
connection = sqlite3.connect("tyr.db")
print("shape : "+str(self.shape))
if (self.shape=="Rectangle"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", THICKNESS),printf(\"%.2f\", WIDTH),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK) ,printf(\"%.2f\", TENSILE_STRENGTH) ,printf(\"%.2f\", def_yeild_strg) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.WIDTH),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK) ,printf(\"%.2f\", TENSILE_STRENGTH) ,printf(\"%.2f\", def_yeild_strg) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID,round(TENSILE_STRENGTH,2),round(MODULUS_100,2),round(MODULUS_200,2),round(MODULUS_300,2),round(MOD_AT_ANY,2) FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="Cylindrical"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", DIAMETER),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", BREAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.DIAMETER),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", BREAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="Pipe"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.2f\", INN_DIAMETER),printf(\"%.2f\", OUT_DIAMTER),printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.INN_DIAMETER),printf(\"%.2f\", A.OUT_DIAMTER),printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
elif (self.shape=="DirectValue"):
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
else:
print("NO Val")
results=connection.execute("SELECT TYPE_STR as specimen_no,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", PREC_E_AT_PEAK),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", PREC_E_AT_BREAK),printf(\"%.2f\", TENSILE_STRENGTH),printf(\"%.2f\", def_yeild_strg),printf(\"%.2f\", MODULUS_100),printf(\"%.2f\", MODULUS_200),printf(\"%.2f\", MODULUS_300),printf(\"%.2f\", MOD_AT_ANY) FROM REPORT_PART_2 A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_PART_2 WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
connection.close()
def select_all_rows_cof(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(6)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.setColumnCount(6)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setHorizontalHeaderLabels(['No.','MAX FORCE(init) \n (gm)','AVG FORCE \n (mm)','STATIC COF',' KINETIC COF ','SLEDGE MASS \n (gm)'])
self.tableWidget.setColumnWidth(0, 170)
self.tableWidget.setColumnWidth(1, 150)
self.tableWidget.setColumnWidth(2, 150)
self.tableWidget.setColumnWidth(3, 150)
self.tableWidget.setColumnWidth(4, 150)
self.tableWidget.setColumnWidth(5, 150)
connection = sqlite3.connect("tyr.db")
#results1=connection.execute("SELECT TYPE_STR,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", COMPRESSIVE_STRENGTH),printf(\"%.2f\", PREC_E_AT_BREAK) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT 'Min',printf(\"%.2f\", Min(MAX_FORCE)) ,printf(\"%.2f\", Min(AVG_FORCE)),printf(\"%.2f\", Min(STATIC_COF)),printf(\"%.2f\", Min(KINETIC_COF)), printf(\"%.2f\", Min(SLEDE_WT_GM)) FROM CYCLES_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR) order by cycle_id Asc")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
connection = sqlite3.connect("tyr.db")
#results1=connection.execute("SELECT TYPE_STR,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", COMPRESSIVE_STRENGTH),printf(\"%.2f\", PREC_E_AT_BREAK) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT 'Max',printf(\"%.2f\", max(MAX_FORCE)) ,printf(\"%.2f\", max(AVG_FORCE)),printf(\"%.2f\", max(STATIC_COF)),printf(\"%.2f\", max(KINETIC_COF)), printf(\"%.2f\", max(SLEDE_WT_GM)) FROM CYCLES_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR) order by cycle_id Asc")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
connection = sqlite3.connect("tyr.db")
#results1=connection.execute("SELECT TYPE_STR,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", COMPRESSIVE_STRENGTH),printf(\"%.2f\", PREC_E_AT_BREAK) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT 'Avg',printf(\"%.2f\", avg(MAX_FORCE)) ,printf(\"%.2f\", avg(AVG_FORCE)),printf(\"%.2f\", avg(STATIC_COF)),printf(\"%.2f\", avg(KINETIC_COF)), printf(\"%.2f\", avg(SLEDE_WT_GM)) FROM CYCLES_MST WHERE TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR) order by cycle_id Asc")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
connection = sqlite3.connect("tyr.db")
#results1=connection.execute("SELECT TYPE_STR,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", COMPRESSIVE_STRENGTH),printf(\"%.2f\", PREC_E_AT_BREAK) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
results1=connection.execute("SELECT ((A.CYCLE_ID)-C.MIN_CYCLE_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.MAX_FORCE) ,printf(\"%.2f\", A.AVG_FORCE),printf(\"%.2f\", A.STATIC_COF),printf(\"%.2f\", A.KINETIC_COF), printf(\"%.2f\", A.SLEDE_WT_GM) FROM CYCLES_MST A , (SELECT min(CYCLE_ID) as MIN_CYCLE_ID,TEST_ID FROM CYCLES_MST WHERE TEST_ID in (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR)) C WHERE A.TEST_ID=C.TEST_ID AND A.TEST_ID IN (SELECT NEW_REPORT_TEST_ID FROM GLOBAL_VAR) order by cycle_id Asc")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
def select_all_rows_compress(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(6)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 100)
self.tableWidget.setColumnWidth(1, 100)
self.tableWidget.setColumnWidth(2, 100)
self.tableWidget.setColumnWidth(3, 180)
self.tableWidget.setColumnWidth(4, 280)
self.tableWidget.setColumnWidth(5, 50)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
if(self.unit_typex == "Kg/Cm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'CS Area \n (cm2)', 'Force at Peak\n (Kgf)', 'Compression \n (cm)', 'Compressive Strength \n (Kgf/Cm2)','% Compression \n'])
elif(self.unit_typex == "Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'CS Area \n (Inch2)', 'Force at Peak\n (Lb)', 'Compression \n (Inch)', 'Compressive Strength \n (Lb/Inch2)','% Compression \n'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'CS Area \n (mm2)', 'Force at Peak\n (N)', 'Compression \n (mm)', 'Compressive Strength \n (N/mm2)','% Compression \n'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'CS Area \n (mm2)', 'Force at Peak\n (N)', 'Compression \n (mm)', 'Compressive Strength \n (MPA)','% Compression \n'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'CS Area \n (mm2)', 'Force at Peak\n (MPA)', 'Compression \n (mm)', 'Compressive Strength \n (MPA)','% Compression \n'])
connection = sqlite3.connect("tyr.db")
results1=connection.execute("SELECT TYPE_STR,printf(\"%.4f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", COMPRESSIVE_STRENGTH),printf(\"%.2f\", PREC_E_AT_BREAK) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
#results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,A.THICKNESS,A.WIDTH,A.CS_AREA,A.PEAK_LOAD,A.E_PAEK_LOAD,A.PERCENTG_E_PEAK_LOAD_MM,A.PERCENTG_E_PEAK_LOAD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.4f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", A.COMPRESSIVE_STRENGTH),printf(\"%.2f\", A.PREC_E_AT_BREAK) FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
def select_all_rows_tear(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 100)
self.tableWidget.setColumnWidth(1, 100)
self.tableWidget.setColumnWidth(2, 100)
self.tableWidget.setColumnWidth(3, 120)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
if(self.unit_typex == "Kg/Cm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'Thickness \n (cm)', 'Force at Peak\n (Kgf)', 'Tear Strength \n (Kgf/Cm)'])
elif(self.unit_typex == "Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'Thickness \n (Inch)', 'Force at Peak\n (Lb)', 'Tear Strength \n (Lb/Inch)'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'Thickness\n (mm)', 'Force at Peak\n (N)', 'Tear Strength \n (N/mm)'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'Thickness\n (mm)', 'Force at Peak\n (N)', 'Tear Strength \n (MPA)'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No', 'Thickness \n (mm)', 'Force at Peak\n (MPA)', 'Tear Strength \n (MPA)'])
connection = sqlite3.connect("tyr.db")
results1=connection.execute("SELECT TYPE_STR,printf(\"%.2f\", THICKNESS),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", TEAR_STRENGTH) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
#results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,A.THICKNESS,A.WIDTH,A.CS_AREA,A.PEAK_LOAD,A.E_PAEK_LOAD,A.PERCENTG_E_PEAK_LOAD_MM,A.PERCENTG_E_PEAK_LOAD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", A.TEAR_STRENGTH) FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
def select_all_rows_flexural(self):
self.length=0
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(16)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 100)
self.tableWidget.setColumnWidth(1, 120)
self.tableWidget.setColumnWidth(2, 120)
self.tableWidget.setColumnWidth(3, 120)
self.tableWidget.setColumnWidth(4, 180)
self.tableWidget.setColumnWidth(5, 180)
self.tableWidget.setColumnWidth(6, 150)
self.tableWidget.setColumnWidth(7, 180)
self.tableWidget.setColumnWidth(8, 150)
self.tableWidget.setColumnWidth(9, 150)
self.tableWidget.setColumnWidth(10, 180)
self.tableWidget.setColumnWidth(11, 150)
self.tableWidget.setColumnWidth(12, 180)
self.tableWidget.setColumnWidth(13, 150)
self.tableWidget.setColumnWidth(14, 150)
self.tableWidget.setColumnWidth(15, 150)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT IFNULL(GUAGE_MM,0) FROM REPORT_MST WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
for x in results:
self.length=str(x[0])
connection.close()
self.tableWidget.horizontalHeader().setStretchLastSection(True)
if(self.unit_typex == "Kg/Cm"):
self.length=float(int(self.length)*0.1)
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (cm)','Thickness \n (cm)','Width \n (cm)','Support \n Span \n (cm)', 'Max. \n Displ. \n (cm)', 'Force \n at Peak \n (Kgf)', 'Flexural \n Strength \n (Kgf/cm2) ','Flexural \n Modulus \n ','Flexural \n Strain \n % (Break)','Flexural \n Strain \n % (Input)',' Support Radious (cm) ',' Load Radious (cm) ','Speed (mm/min)','Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "Lb/Inch"):
self.length=float(int(self.length)*0.0393701)
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Inch)','Thickness \n (Inch)','Width \n (Inch)','Support \n Span \n (Inch)', 'Max. \n Displ. \n (Inch)', 'Force \n at Peak\n (Lb)', 'Flexural \n Strength \n (Lb/Inch2) ','Flexural \n Modulus \n ','Flexural \n Strain \n % (Break)','Flexural \n Strain \n % (Input)',' Support Radious (Inch) ',' Load Radious (Inch) ','Speed (mm/min)','Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (mm)','Thickness \n (mm)','Width \n (mm)','Support \n Span \n (mm)', 'Max. \n Displ. \n (mm)', 'Force \n at Peak\n (N)', 'Flexural \n Strength \n (N/mm2)','Flexural \n Modulus \n ','Flexural \n Strain \n % (Break)','Flexural \n Strain \n % (Input)',' Support Radious (mm) ',' Load Radious (mm) ','Speed (mm/min)','Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (mm)','Thickness \n (mm)','Width \n (mm)','Support \n Span \n (mm)', 'Max. \n Displ. \n (mm)', 'Force \n at Peak\n (N)', 'Flexural \n Strength \n (MPa)','Flexural \n Modulus \n ','Flexural \n Strain \n % (Break)','Flexural \n Strain \n % (Input)',' Support Radious (mm) ',' Load Radious (mm) ','Speed (mm/min)','Failure \n Mode','Test \n Method'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (mm)', 'Thickness \n (mm)','Width \n (mm)','Support \n Span \n (mm)','Max. \n Displ. \n (mm)', 'Force \n at Peak\n (Kgf)', 'Flexural\n Strength \n (MPa)','Flexural \n Modulus \n ','Flexural \n Strain \n % (Break)','Flexural \n Strain \n % (Input)',' Support Radious (mm) ',' Load Radious (mm) ','Speed (mm/min)','Failure \n Mode','Test \n Method'])
connection = sqlite3.connect("tyr.db")
results1=connection.execute("SELECT TYPE_STR,990,printf(\"%.2f\", THICKNESS),printf(\"%.2f\", WIDTH),printf(\"%.2f\", SPAN),printf(\"%.2f\", E_PAEK_LOAD),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", FLEXURAL_STRENGTH),printf(\"%.2f\", flexural_mod_kg_cm),printf(\"%.2f\", per_strain_at_break),printf(\"%.2f\", per_strain_at_input),printf(\"%.2f\", support_radious),printf(\"%.2f\", load_radious),printf(\"%.2f\", speed_rpm) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
#results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,A.THICKNESS,A.WIDTH,A.CS_AREA,A.PEAK_LOAD,A.E_PAEK_LOAD,A.PERCENTG_E_PEAK_LOAD_MM,A.PERCENTG_E_PEAK_LOAD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,"+str(self.length)+",printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.WIDTH),printf(\"%.2f\", A.SPAN),printf(\"%.2f\", A.E_PAEK_LOAD),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.FLEXURAL_STRENGTH),printf(\"%.2f\", A.FLEXURAL_MOD_KG_CM),printf(\"%.2f\", A.PER_STRAIN_AT_BREAK),printf(\"%.2f\", A.PER_STRAIN_AT_INPUT),printf(\"%.2f\", A.SUPPORT_RADIOUS),printf(\"%.2f\", A.LOAD_RADIOUS),printf(\"%.2f\", A.SPEED_RPM),A.BREAK_MODE,A.TEST_METHOD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
def select_all_rows_qlss(self):
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(11)
font = QtGui.QFont()
font.setPointSize(10)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 80)
self.tableWidget.setColumnWidth(1, 80)
self.tableWidget.setColumnWidth(2, 80)
self.tableWidget.setColumnWidth(3, 80)
self.tableWidget.setColumnWidth(4, 80)
self.tableWidget.setColumnWidth(5, 80)
self.tableWidget.setColumnWidth(6, 100)
self.tableWidget.setColumnWidth(7, 100)
self.tableWidget.setColumnWidth(8, 250)
self.tableWidget.setColumnWidth(9, 150)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT MOD_AT_ANY FROM REPORT_MST WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
for x in results:
self.shear_mod_ip=str(x[0])
connection.close()
if(self.shear_mod_ip == ""):
self.shear_mod_ip=100
else:
pass
self.tableWidget.horizontalHeader().setStretchLastSection(True)
if(self.unit_typex == "Kg/Cm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Width \n (Cm)','Thickness \n (Cm)','CS Area \n (Cm2)','Max. Force \n (Kgf)',' Max. \n Disp.(Cm) ','Ult. Shear\n Strength \n (Kgf/Cm2)','Ult. Shear \n Strain %','Shear Strain \n @ Ult. Shear Stress','Shear Modulus \n @ Ult. Shear Stress \n (Kg/Cm2)','Shear Modulus \n @ '+str(self.shear_mod_ip)+'\n (Kg/Cm2) Shear Stress'])
elif(self.unit_typex == "Lb/Inch"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Width \n (Inch)','Thickness \n (Inch)','CS Area \n (Inch2)','Max. Force \n (Lb)',' Max. \n Disp.(Inch) ','Ult. Shear\n Strength \n (Lb\Inch2)','Ult. Shear \n Strain %','Shear Strain \n @ Ult. Shear Stress','Shear Modulus \n @ Ult. Shear Stress \n (Lb/Inch2)','Shear Modulus \n @ '+str(self.shear_mod_ip)+'\n (Lb/Inch2) Shear Stress'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Width \n (Mm)','Thickness \n (Mm)','CS Area \n (Mm2)','Max. Force \n (N)',' Max. \n Disp.(Mm) ','Ult. Shear\n Strength \n (N/Mm2)','Ult. Shear \n Strain %','Shear Strain \n @ Ult. Shear Stress','Shear Modulus \n @ Ult. Shear Stress \n (N/Mm2)','Shear Modulus \n @ '+str(self.shear_mod_ip)+' \n (N/Mm2) Shear Stress'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Width \n (Mm)','Thickness \n (Mm)','CS Area \n (Mm2)','Max. Force \n (N)',' Max. \n Disp.(Mm) ','Ult. Shear\n Strength \n (MPA)','Ult. Shear \n Strain %','Shear Strain \n @ Ult. Shear Stress','Shear Modulus \n @ Ult. Shear Stress','Shear Modulus \n @ '+str(self.shear_mod_ip)+' Shear Stress'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Width \n (Mm)','Thickness \n (Mm)','CS Area \n (Mm2)','Max. Force \n (Kgf)',' Max. \n Disp.(Mm) ','Ult. Shear\n Strength','Ult. Shear \n Strain %','Shear Strain \n @ Ult. Shear Stress','Shear Modulus \n @ Ult. Shear Stress','Shear Modulus \n @ '+str(self.shear_mod_ip)+' Shear Stress'])
#self.tableWidget.setHorizontalHeaderLabels.append('xsdsdsd')
connection = sqlite3.connect("tyr.db")
results1=connection.execute("SELECT TYPE_STR,printf(\"%.2f\", WIDTH),printf(\"%.2f\", THICKNESS),printf(\"%.2f\", CS_AREA),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", ULT_SHEAR_STRENGTH_KG_CM),printf(\"%.2f\", ULT_SHEAR_STRAIN_KG_CM),printf(\"%.2f\", SHEAR_STRAIN_COLUMN_VALUE_KG_CM),printf(\"%.2f\", SHEAR_MOD_COLUMN_VALUE_KG_CM),printf(\"%.2f\",(("+str(self.shear_mod_ip)+")/(SHEAR_STRAIN_COLUMN_VALUE_KG_CM))) FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
#results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,A.THICKNESS,A.WIDTH,A.CS_AREA,A.PEAK_LOAD,A.E_PAEK_LOAD,A.PERCENTG_E_PEAK_LOAD_MM,A.PERCENTG_E_PEAK_LOAD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,printf(\"%.2f\", A.WIDTH),printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.CS_AREA),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_BREAK_LOAD),printf(\"%.2f\", A.ULT_SHEAR_STRENGTH_KG_CM),printf(\"%.2f\", A.ULT_SHEAR_STRAIN_KG_CM),printf(\"%.2f\", A.SHEAR_STRAIN_COLUMN_VALUE_KG_CM)||'@ '||printf(\"%.2f\", A.SHEAR_MOD_COLUMN_NAME_KG_CM),printf(\"%.2f\", A.SHEAR_MOD_COLUMN_VALUE_KG_CM)||'@ '||printf(\"%.2f\", A.SHEAR_MOD_COLUMN_NAME_KG_CM),printf(\"%.2f\",(("+str(self.shear_mod_ip)+")/(A.SHEAR_STRAIN_COLUMN_VALUE_KG_CM))) FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
def select_all_rows_ilss(self):
self.length=0
self.delete_all_records()
self.tableWidget.setMidLineWidth(-4)
self.tableWidget.setGridStyle(QtCore.Qt.SolidLine)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(10)
font = QtGui.QFont()
font.setPointSize(9)
self.tableWidget.setFont(font)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.verticalHeader().setStyleSheet("QHeaderView { font-size: 10pt};")
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
self.tableWidget.horizontalHeader().setStretchLastSection(True)
self.tableWidget.setColumnWidth(0, 80)
self.tableWidget.setColumnWidth(1, 80)
self.tableWidget.setColumnWidth(2, 80)
self.tableWidget.setColumnWidth(3, 80)
self.tableWidget.setColumnWidth(4, 180)
self.tableWidget.setColumnWidth(5, 100)
self.tableWidget.setColumnWidth(6, 100)
self.tableWidget.setColumnWidth(7, 100)
self.tableWidget.setColumnWidth(8, 200)
self.tableWidget.setColumnWidth(9, 300)
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT STG_GRAPH_TYPE,STG_UNIT_TYPE FROM GLOBAL_REPORTS_PARAM")
for x in results:
self.unit_typex=x[1]
connection.close()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT MOD_AT_ANY,IFNULL(GUAGE_MM,0) FROM REPORT_MST WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
for x in results:
self.shear_mod_ip=str(x[0])
self.length=str(x[1])
connection.close()
if(self.shear_mod_ip == ""):
self.shear_mod_ip=100
else:
pass
self.tableWidget.horizontalHeader().setStretchLastSection(True)
if(self.unit_typex == "Kg/Cm"):
self.length=float(int(self.length)*0.1)
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Cm)','Width \n (Cm)','Thickness \n (Cm)','Max. Force \n (Kgf)',' Max. \n Disp.(Cm) ',' Shear\n Strength \n (Kgf/Cm2)','Support \n SPAN (Cm)',' Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "Lb/Inch"):
self.length=float(int(self.length)*0.0393701)
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Inch)','Width \n (Inch)','Thickness \n (Inch)','Max. Force \n (Lb)',' Max. \n Disp.(Inch) ',' Shear\n Strength \n (Lb\Inch2)','Support \n SPAN (Inch)',' Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "Newton/Mm"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Mm)','Width \n (Mm)','Thickness \n (Mm)','Max. Force \n (N)',' Max. \n Disp.(Mm) ',' Shear\n Strength \n (N/Mm2)','Support \n SPAN (Mm)',' Failure \n Mode','Test \n Method'])
elif(self.unit_typex == "MPA"):
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Mm)','Width \n (Mm)','Thickness \n (Mm)','Max. Force \n (N)',' Max. \n Disp.(Mm) ',' Shear\n Strength \n (MPA)','Support \n SPAN (Mm)',' Failure \n Mode','Test \n Method'])
else:
self.tableWidget.setHorizontalHeaderLabels(['Spec. \n No','Length \n (Mm)','Width \n (Mm)','Thickness \n (Mm)','Max. Force \n (Kgf)',' Max. \n Disp.(Mm) ',' Shear\n Strength','Support \n SPAN (Mm)',' Failure \n Mode','Test \n Method'])
#self.tableWidget.setHorizontalHeaderLabels.append('xsdsdsd')
connection = sqlite3.connect("tyr.db")
results1=connection.execute("SELECT TYPE_STR,"+str(self.length)+",printf(\"%.2f\", WIDTH),printf(\"%.2f\", THICKNESS),printf(\"%.2f\", PEAK_LOAD),printf(\"%.2f\", E_BREAK_LOAD),printf(\"%.2f\", ULT_SHEAR_STRENGTH_KG_CM),printf(\"%.2f\", SPAN),BREAK_MODE,NULL FROM REPORT_II_AGGR WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR)")
#results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,A.THICKNESS,A.WIDTH,A.CS_AREA,A.PEAK_LOAD,A.E_PAEK_LOAD,A.PERCENTG_E_PEAK_LOAD_MM,A.PERCENTG_E_PEAK_LOAD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID ")
for row_number, row_data in enumerate(results1):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
#self.tableWidget.resizeColumnsToContents()
#self.tableWidget.resizeRowsToContents()
connection = sqlite3.connect("tyr.db")
results=connection.execute("SELECT ((A.REC_ID)-B.MIN_REC_ID)+1 AS SPECIMEN_NO,"+str(self.length)+",printf(\"%.2f\", A.WIDTH),printf(\"%.2f\", A.THICKNESS),printf(\"%.2f\", A.PEAK_LOAD),printf(\"%.2f\", A.E_BREAK_LOAD),printf(\"%.2f\", A.ULT_SHEAR_STRENGTH_KG_CM),printf(\"%.2f\", A.SPAN),A.BREAK_MODE,A.TEST_METHOD FROM REPORT_MST_II A, (SELECT MIN(REC_ID) AS MIN_REC_ID, REPORT_ID FROM REPORT_MST_II WHERE REPORT_ID IN (SELECT NEW_REPORT_ID FROM GLOBAL_VAR) ) B WHERE A.REPORT_ID=B.REPORT_ID")
for row_number, row_data in enumerate(results):
self.tableWidget.insertRow(row_number)
for column_number, data in enumerate(row_data):
self.tableWidget.setItem(row_number,column_number,QTableWidgetItem(str(data)))
connection.close()
self.tableWidget.setEditTriggers(QtWidgets.QTableWidget.NoEditTriggers)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = TY_06_Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
d1c996c98e38caf3f89a4b1b7f101c7d1770330d
|
5bd3122d230471b048429f5e9c49a0b39c8a54fc
|
/Atcoder_contests/ABC/165A.py
|
98d42dc081acdb8f08d06c61550ccddab9dc004a
|
[] |
no_license
|
nao1412/Competitive_Programing_Codes
|
e230e2fa85027e41c5ee062083801bb299effe9b
|
98c29b5ba75e75502cf27fcf365a7aedcd6c273c
|
refs/heads/main
| 2023-06-05T18:45:59.733301 | 2021-06-23T15:02:25 | 2021-06-23T15:02:25 | 374,061,897 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 234 |
py
|
k = int(input())
ans = 'OK'
a, b = map(int, input().split())
if b-a+1 >= k:
ans = 'OK'
else:
for i in range(a, b+1):
if i%k == 0:
ans = 'OK'
break
else:
ans = 'NG'
print(ans)
|
[
"[email protected]"
] | |
3d3bdc439906914e4d1a544305fefc7b801e63fd
|
6d80c2e28c39b1861d909c7616ce8455f2b28744
|
/character_stream.py
|
415517a5c4ef622e0d85ae20ea615fa6c5f81221
|
[] |
no_license
|
o11c/20131022-parser
|
ef1778c8469fe397524c0bb9e72fa284cde4af25
|
7da2dfa3a92dcdf32bfa11bb67c6569d3e1e1767
|
refs/heads/master
| 2020-02-26T13:24:06.954176 | 2013-10-22T22:34:40 | 2013-10-22T22:34:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 304 |
py
|
class EOFError(Exception):
pass
class CharacterStream(object):
def __init__(self, stream):
self.stream = stream
self.adv()
def get(self):
return self.ch
def adv(self):
self.ch = self.stream.read(1)
if self.ch == '':
raise EOFError()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.