blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3a7e9c3a0692d2974f6acff056f57ab42a7bf10 | 524591f2c4f760bc01c12fea3061833847a4ff9a | /arm/usr/lib/python2.7/dist-packages/rosdistro/source_file.py | d614c68bd8ca24a162d2b0dcb90e2b2449f5b3d9 | [
"BSD-3-Clause",
"Python-2.0"
] | permissive | Roboy/roboy_plexus | 6f78d45c52055d97159fd4d0ca8e0f32f1fbd07e | 1f3039edd24c059459563cb81d194326fe824905 | refs/heads/roboy3 | 2023-03-10T15:01:34.703853 | 2021-08-16T13:42:54 | 2021-08-16T13:42:54 | 101,666,005 | 2 | 4 | BSD-3-Clause | 2022-10-22T13:43:45 | 2017-08-28T16:53:52 | C++ | UTF-8 | Python | false | false | 3,365 | py | # Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .repository_specification import RepositorySpecification
class SourceFile(object):
_type = 'distribution'
def __init__(self, name, data):
self.name = name
assert 'type' in data and data['type'] != 'source', "Unable to handle 'source' format anymore, please update your 'source' file to the latest specification"
assert 'type' in data, "Expected file type is '%s'" % SourceFile._type
assert data['type'] == SourceFile._type, "Expected file type is '%s', not '%s'" % (SourceFile._type, data['type'])
assert 'version' in data, "Source file for '%s' lacks required version information" % self.name
assert int(data['version']) == 1, "Unable to handle '%s' format version '%d', please update rosdistro (e.g. on Ubuntu/Debian use: sudo apt-get update && sudo apt-get install --only-upgrade python-rosdistro)" % (SourceFile._type, int(data['version']))
self.version = int(data['version'])
self.repositories = {}
if 'repositories' in data:
for repo_name in sorted(data['repositories']):
repo_data = data['repositories'][repo_name]
if 'source' not in repo_data:
continue
repo_data = repo_data['source']
try:
assert 'version' in repo_data, "Repository '%s' lacks required version information" % repo_name
repo = RepositorySpecification(repo_name, repo_data)
except AssertionError as e:
e.args = [("Source file '%s': %s" % (self.name, a) if i == 0 else a) for i, a in enumerate(e.args)]
raise e
self.repositories[repo_name] = repo
| [
"[email protected]"
] | |
8cd578f9a8dc8366a1bea5ec6d51e91c6c3858fc | 1609fe579811afe6f36ddca9d7c838ba5697131a | /radio/management/commands/set_default_access_tg.py | d05b1976d5381eb276ac621005d8e58c1a02900a | [
"MIT"
] | permissive | ScanOC/trunk-player | 1777347f47744538a33109a05b72d5f28d2674ef | 95f37e5a55a8f2a8b2ff6e1bb0a2b1049bc97ac4 | refs/heads/master | 2023-08-03T14:52:08.834013 | 2023-05-01T18:05:33 | 2023-05-01T18:05:33 | 68,432,108 | 65 | 50 | MIT | 2023-08-02T01:34:28 | 2016-09-17T04:35:59 | Python | UTF-8 | Python | false | false | 1,484 | py | import sys
import datetime
import csv
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from radio.models import *
class Command(BaseCommand):
help = 'Helper for new TalkGroup Access'
def add_arguments(self, parser):
parser.add_argument('access_group_name')
def handle(self, *args, **options):
access_menu(self, options)
def access_menu(self, options):
try:
access_gp = TalkGroupAccess.objects.get(name=options['access_group_name'])
except TalkGroupAccess.DoesNotExist:
self.stdout.write(self.style.ERROR('Talk Group Access List [{}] does not exist, check case and spelling'.format(options['access_group_name'])))
all_access_names = TalkGroupAccess.objects.all()
if all_access_names:
self.stdout.write('Current Talk Group Access lists in the database:')
for tg in all_access_names:
self.stdout.write(tg.name)
else:
self.stdout.write(self.style.ERROR('**There are no Talk Group Access lists in the database'))
return
self.stdout.write('Setting all current public Talk Groups into {}'.format(access_gp.name))
ct=0
for tg in TalkGroupWithSystem.objects.filter(public=True):
access_gp.talkgroups.add(tg)
ct += 1
self.stdout.write(self.style.SUCCESS('Added {} TalkGroups to Talk Group Access List - {}'.format(ct, access_gp.name)))
| [
"[email protected]"
] | |
b1f2cc85a77d533e1c78ae8a0ad3cb6b46551d10 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/angle/third_party/glmark2/src/waflib/Tools/md5_tstamp.py | 29522311b19f3c194ccdb4ae91512b38a2798964 | [
"GPL-3.0-only",
"LicenseRef-scancode-x11-opengl",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 732 | py | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,stat
from waflib import Utils,Build,Node
STRONGEST=True
Build.SAVED_ATTRS.append('hashes_md5_tstamp')
def h_file(self):
filename=self.abspath()
st=os.stat(filename)
cache=self.ctx.hashes_md5_tstamp
if filename in cache and cache[filename][0]==st.st_mtime:
return cache[filename][1]
global STRONGEST
if STRONGEST:
ret=Utils.h_file(filename)
else:
if stat.S_ISDIR(st[stat.ST_MODE]):
raise IOError('Not a file')
ret=Utils.md5(str((st.st_mtime,st.st_size)).encode()).digest()
cache[filename]=(st.st_mtime,ret)
return ret
h_file.__doc__=Node.Node.h_file.__doc__
Node.Node.h_file=h_file
| [
"[email protected]"
] | |
30de42b2f9e57942415e8b6c7f980872b821c582 | de590d5af29b1f962853a0c4395aa95a8aa06b58 | /0x1F-pascal_triangle/0-pascal_triangle.py | 2359f75ddfbbe4082bf683c1413fe97087f9b162 | [] | no_license | MCavigli/holbertonschool-interview | ff31425e41a49bddd6d96a1bd41835830246f132 | ab7c1e9b92eb113e8b28db82912e327736e4813f | refs/heads/master | 2020-12-23T06:32:16.256851 | 2020-11-12T20:13:48 | 2020-11-12T20:13:48 | 237,068,237 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | #!/usr/bin/python3
"""
0-pascal_triangle
"""
def pascal_triangle(n):
"""
Returns a list of integers representing Pascal's triangle
"""
lst = [[] for i in range(0, n)]
for i in range(0, n):
for j in range(i + 1):
if (j < i):
if (j <= 0):
lst[i].append(1)
else:
lst[i].append(lst[i - 1][j] + lst[i - 1][j - 1])
elif (i == j):
lst[i].append(1)
return (lst)
| [
"[email protected]"
] | |
7114d514f06a2b225e3125b6227ae109bc916818 | f98fc6d067d1b82b184deeed530ffec38f3d0e9b | /waliki/views.py | 5a03797bd254bc69070d75eedb5591865b9890ca | [
"BSD-3-Clause"
] | permissive | leliel12/waliki | 93de82e6326018578f70112446388ea66f4f3ddc | 610d7ffb652e5eaa73824f4d69c85701ca059609 | refs/heads/master | 2021-01-17T14:07:59.503425 | 2014-09-21T23:38:54 | 2014-09-21T23:38:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | import json
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .models import Page
from .forms import PageForm
from .signals import page_saved
from ._markups import get_all_markups
from . import settings
def home(request):
return detail(request, slug=settings.WALIKI_INDEX_SLUG)
def detail(request, slug):
slug = slug.strip('/')
try:
page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
page = None
return render(request, 'waliki/detail.html', {'page': page, 'slug': slug})
def edit(request, slug):
slug = slug.strip('/')
page, _ = Page.objects.get_or_create(slug=slug)
data = request.POST if request.method == 'POST' else None
form = PageForm(data, instance=page)
if form.is_valid():
form.save()
page_saved.send(sender=edit,
page=page,
author=request.user,
message=form.cleaned_data["message"])
return redirect('waliki_detail', slug=page.slug)
cm_modes = [(m.name, m.codemirror_mode_name) for m in get_all_markups()]
cm_settings = settings.WALIKI_CODEMIRROR_SETTINGS
cm_settings.update({'mode': dict(cm_modes)[page.markup]})
return render(request, 'waliki/edit.html', {'page': page,
'form': form,
'slug': slug,
'cm_modes': cm_modes,
'cm_settings': json.dumps(cm_settings)})
def preview(request):
data = {}
if request.is_ajax() and request.method == "POST":
data['html'] = Page.preview(request.POST['markup'], request.POST['text'])
return HttpResponse(json.dumps(data), content_type="application/json")
def delete(request, slug):
return render(request, 'waliki/detail.html', {})
| [
"[email protected]"
] | |
6e3fecbf74e22a8df6f5db0f7c7c17cca2e1d3aa | 96316aead0ad883d93eebc3fcd2dcbb30ad6636a | /authproj/authapp/views.py | bee4b8390958421253069d227518f90b41c00afa | [] | no_license | Rashika233/django_auth_new | fe2e5da31e4d1e309946c333fa0ed2d7ec369a44 | c3fbf2a7b042e33ff92ee894a63976ad97e77f7c | refs/heads/master | 2022-12-04T10:06:10.294965 | 2020-08-27T15:54:52 | 2020-08-27T15:54:52 | 290,818,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class HomePageView(TemplateView):
template_name='home.html' | [
"[email protected]"
] | |
c7a14f2f19153b5ed04edaec1cd126007973aac9 | 46e29adaf067dea495da67b09bf1be60d26c45c9 | /genetic_algorithm/chromosome.py | 9de551cf850875c5f52ca27381599032ce386d95 | [
"MIT"
] | permissive | GeorgianBadita/Genetic-Programming-Function-Approximation | 89800f96a7c457a6b7d5d4bb2deae915a7e7f3bc | 5436074941d8888eb545e2cc4b332c6c832342f3 | refs/heads/master | 2020-09-12T20:58:09.383435 | 2020-06-13T10:22:27 | 2020-06-13T10:22:27 | 222,554,527 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,825 | py | import random
import math
import numpy as np
import warnings
warnings.filterwarnings("error")
class Chromosome:
"""
Class for representing a chromosome
"""
def __init__(self, terminal_set, funct_set, depth, method='full'):
"""
Constructor for Chromosome class
@param: depth - tree depth
@param: method - method to generate the tree, default is full
@param: terminal_set - set of terminals
@param: funct_set - set of functions
"""
self.depth = depth
self.gen = []
self.terminal_set = terminal_set
self.func_set = funct_set
self.fitness = None
if method == 'grow':
self.grow()
elif method == 'full':
self.full()
def full(self, level = 0):
"""
Function to generate a tree in a full manner
Every node will have exactly two children
return: None
"""
if level == self.depth:
self.gen.append(random.choice(self.terminal_set))
else:
val = random.choice(self.func_set[1] + self.func_set[2])
if val in self.func_set[2]:
self.gen.append(random.choice(self.func_set[2]))
self.full(level + 1)
self.full(level + 1)
else:
self.gen.append(random.choice(self.func_set[1]))
self.full(level + 1)
def grow(self, level = 0):
"""
Function to generate a tree in a grow manner
Every node may be a terminal or a function
@return: None
"""
if level == self.depth:
self.gen.append(random.choice(self.terminal_set))
else:
if random.random() > 0.3:
val = random.choice(self.func_set[2] + self.func_set[1])
if val in self.func_set[2]:
self.gen.append(val)
self.grow(level + 1)
self.grow(level + 1)
else:
self.gen.append(val)
self.grow(level + 1)
else:
val = random.choice(self.terminal_set)
self.gen.append(val)
def eval(self, input, poz = 0):
"""
Function to evaluate the current chromosome with a given input
@param: input - function input (x0, x1... xn)
@poz: current_position in genotype
@return:
"""
if self.gen[poz] in self.terminal_set:
return input[int(self.gen[poz][1:])], poz
elif self.gen[poz] in self.func_set[2]:
poz_op = poz
left, poz = self.eval(input, poz + 1)
right, poz = self.eval(input, poz + 1)
if self.gen[poz_op] == '+':
return left + right, poz
elif self.gen[poz_op] == '-':
return left - right, poz
elif self.gen[poz_op] == '*':
return left * right, poz
elif self.gen[poz_op] == '^':
return left ** right, poz
elif self.gen[poz_op] == '/':
return left / right, poz
else:
poz_op = poz
left, poz = self.eval(input, poz + 1)
if self.gen[poz_op] == 'sin':
return np.sin(left), poz
elif self.gen[poz_op] == 'cos':
return np.cos(left), poz
elif self.gen[poz_op] == 'ln':
return np.log(left), poz
elif self.gen[poz_op] == 'sqrt':
return np.sqrt(left), poz
elif self.gen[poz_op] == 'tg':
return np.tan(left), poz
elif self.gen[poz_op] == 'ctg':
return 1/np.tan(left), poz
elif self.gen[poz_op] == 'e':
return np.exp(left), poz
elif self.gen[poz_op] == 'tanh':
return np.tanh(left), poz
elif self.gen[poz_op] == 'abs':
return abs(left), poz
def evaluate_arg(self, input):
"""
Function to evaluate the current genotype to a given input
@return: the value of self.gen evaluated at the given input
"""
return self.eval(input)[0]
def calculate_fitness(self, inputs, outputs):
"""
Function to claculate the fitness of a chromosome
@param inputs: inputs of the function we want to predict
@param outputs: outputs of the function we want to predict
@return: the chromosome's fitness (calculated based on MSE)
"""
diff = 0
for i in range(len(inputs)):
try:
diff += (self.eval(inputs[i])[0] - outputs[i][0])**2
except RuntimeWarning:
self.gen = []
if random.random() > 0.5:
self.grow()
else:
self.full()
self.calculate_fitness(inputs, outputs)
if len(inputs) == 0:
return 1e9
self.fitness = diff/(len(inputs))
return self.fitness
def __get_depth_aux(self, poz = 0):
"""
Function to get the depth of a chromosome
@return: chromosome's depth, last pos
"""
elem = self.gen[poz]
if elem in self.func_set[2]:
left, poz = self.__get_depth_aux(poz + 1)
right, poz = self.__get_depth_aux(poz)
return 1 + max(left, right), poz
elif elem in self.func_set[1]:
left, poz = self.__get_depth_aux(poz + 1)
return left + 1, poz
else:
return 1, poz + 1
def get_depth(self):
"""
Function to get the depth of a chromosome
@return: - chromosome's depth
"""
return self.__get_depth_aux()[0] - 1 | [
"[email protected]"
] | |
917e84a924d158a00d84a9381f093fbcf770d0e0 | 0b08158331fc9dfaead2ce5d67665facc36ff7f5 | /openstack_dashboard/dashboards/admin/routers/tables.py | e914f08ad26039c3eb0136534521d644768d30d4 | [] | no_license | linyihan2013/horizon | 7086a5aad773c5eb45762b5ad8465e7e8c52e0fc | 42adcfdcaeaf3366b6b8664d7de485fb8c3c901e | refs/heads/master | 2021-01-11T19:13:35.118072 | 2017-02-03T07:45:40 | 2017-02-03T07:45:40 | 79,337,770 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,003 | py | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.routers import tables as r_tables
class DeleteRouter(r_tables.DeleteRouter):
redirect_url = "horizon:admin:routers:index"
class EditRouter(r_tables.EditRouter):
url = "horizon:admin:routers:update"
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, router_id):
router = api.neutron.router_get(request, router_id)
return router
class AdminRoutersFilterAction(r_tables.RoutersFilterAction):
name = 'filter_admin_routers'
filter_choices = r_tables.RoutersFilterAction.filter_choices + (
('project', _("Project ="), True),)
class RoutersTable(r_tables.RoutersTable):
tenant = tables.Column("tenant_name", verbose_name=_("Project"))
name = tables.WrappingColumn("name",
verbose_name=_("Name"),
link="horizon:admin:routers:detail")
class Meta(object):
name = "routers"
verbose_name = _("Routers")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (DeleteRouter, AdminRoutersFilterAction)
row_actions = (EditRouter, DeleteRouter,)
Columns = ('tenant', 'name', 'status', 'distributed', 'ext_net')
| [
"[email protected]"
] | |
50f356f3cc16e1baf68dac9dd9953090eafc3811 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/ix1.py | 9ef14eb7866e68fb5e0f192e9f0a85df0b935088 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'ix1':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
94dc893c82918cbe23b3f108f0902528e8950ded | 22749c6a569661b2637233cc0aebdc1701033b26 | /src/python/pants/backend/python/lint/flake8/subsystem_test.py | b1819103c29e14c0aef21b06727ffe544a568063 | [
"Apache-2.0"
] | permissive | akk5597/pants | 2eceb226c39b8ef7f603dfa96684b7522e1a9065 | 7ad295f71d2990eebbbe9c778bbf70f7d9e66584 | refs/heads/main | 2023-08-27T02:40:54.753545 | 2021-11-10T03:42:18 | 2021-11-10T03:42:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,397 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
from pants.backend.python import target_types_rules
from pants.backend.python.goals.lockfile import PythonLockfileRequest
from pants.backend.python.lint.flake8 import skip_field
from pants.backend.python.lint.flake8.subsystem import Flake8LockfileSentinel
from pants.backend.python.lint.flake8.subsystem import rules as subsystem_rules
from pants.backend.python.target_types import PythonSourcesGeneratorTarget
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.core.target_types import GenericTarget
from pants.testutil.rule_runner import QueryRule, RuleRunner
def test_setup_lockfile_interpreter_constraints() -> None:
rule_runner = RuleRunner(
rules=[
*subsystem_rules(),
*skip_field.rules(),
*target_types_rules.rules(),
QueryRule(PythonLockfileRequest, [Flake8LockfileSentinel]),
],
target_types=[PythonSourcesGeneratorTarget, GenericTarget],
)
global_constraint = "==3.9.*"
rule_runner.set_options(
["--flake8-lockfile=lockfile.txt"],
env={"PANTS_PYTHON_INTERPRETER_CONSTRAINTS": f"['{global_constraint}']"},
)
def assert_ics(build_file: str, expected: list[str]) -> None:
rule_runner.write_files({"project/BUILD": build_file, "project/f.py": ""})
lockfile_request = rule_runner.request(PythonLockfileRequest, [Flake8LockfileSentinel()])
assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected)
assert_ics("python_sources()", [global_constraint])
assert_ics("python_sources(interpreter_constraints=['==2.7.*'])", ["==2.7.*"])
assert_ics(
"python_sources(interpreter_constraints=['==2.7.*', '==3.5.*'])", ["==2.7.*", "==3.5.*"]
)
# If no Python targets in repo, fall back to global [python] constraints.
assert_ics("target()", [global_constraint])
# Ignore targets that are skipped.
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'], skip_flake8=True)
"""
),
["==2.7.*"],
)
# If there are multiple distinct ICs in the repo, we OR them. This is because Flake8 will
# group into each distinct IC.
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*'])
python_sources(name='b', interpreter_constraints=['==3.5.*'])
"""
),
["==2.7.*", "==3.5.*"],
)
assert_ics(
dedent(
"""\
python_sources(name='a', interpreter_constraints=['==2.7.*', '==3.5.*'])
python_sources(name='b', interpreter_constraints=['>=3.5'])
"""
),
["==2.7.*", "==3.5.*", ">=3.5"],
)
assert_ics(
dedent(
"""\
python_sources(name='a')
python_sources(name='b', interpreter_constraints=['==2.7.*'])
python_sources(name='c', interpreter_constraints=['>=3.6'])
"""
),
["==2.7.*", global_constraint, ">=3.6"],
)
| [
"[email protected]"
] | |
b094561bb43bc796f0b5a3d8cab158ada80d2a5c | 7bcb08ff9aa4c7aee78fe11a51375ad69e5c651d | /TestMethod.py | d58300564fc885c2f6f9582216346878d8782568 | [] | no_license | lc527756006/MachineLearningCW3 | 923441a548891c104800d78c18c476733296c2aa | 1c21d4579f79f1154769996c34283cb3f921f304 | refs/heads/master | 2021-01-17T04:25:35.786315 | 2017-02-22T23:14:33 | 2017-02-22T23:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | # encoding=utf-8
import numpy as ny
import scipy.io as sio
a=[[1,2,3],[4,5,6]]
b=ny.array(a)
c=[0 for i in range(6)]
def load_data(i):
# 加载数据
clean_data = sio.loadmat("DecisionTreeData/cleandata_students.mat")
tdata = clean_data['x']
ldata = clean_data['y']
print len(tdata)
print len(ldata)
# 处理label
label_result=[]
tdata_result=[]
for ind,label_data in enumerate(ldata):
if ind % 10 ==i:
real_label=label_data[0]
temp_label=[0 for i in range(6)]
temp_label[real_label-1]=1
label_result.append(temp_label)
tdata_result.append(tdata[ind])
ny_tdata=ny.array(tdata_result)
ny_label=ny.array(label_result)
return ny_tdata,ny_label
d,e=load_data(1)
# print b
# print c
print len(d)
print len(e) | [
"[email protected]"
] | |
37a6ab00fbd2140699d440e7c55e4f0516154c08 | 0c70dcec22a090e70b1f20613ea6e0a64fd9a037 | /GPS卫星位置的计算/venv/Lib/site-packages/pandas/tests/frame/methods/test_value_counts.py | d6ae0421b79083a459cc483107b921a45be42b0c | [
"MIT"
] | permissive | payiz-asj/Gis | 82c1096d830878f62c7a0d5dfb6630d4e4744764 | 3d315fed93e2ab850b836ddfd7a67f5618969d10 | refs/heads/main | 2023-06-27T15:25:17.301154 | 2021-08-03T10:02:58 | 2021-08-03T10:02:58 | 392,269,853 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,733 | py | import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_normalize():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(normalize=True)
expected = pd.Series(
data=[0.5, 0.25, 0.25],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_single_col_default():
df = pd.DataFrame({"num_legs": [2, 4, 4, 6]})
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays([[4, 6, 2]], names=["num_legs"]),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts()
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty_normalize():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts(normalize=True)
expected = pd.Series([], dtype=np.float64)
tm.assert_series_equal(result, expected)
| [
"[email protected]"
] | |
bc4ef7ea57a0c417ba89d83543e3f2bde704ec36 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_sp_outer/Jobs/Pc/Pc_neut_neut_inner0_outer4/Pc_neut_neut_inner0_outer4.py | 1753bcc8360bd27ab685b3d3e60a4b4dc2778655 | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 5,279 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='Pc_neut_neut_inner0_outer4'
#For crystals here, all cubic and centred at centre
insize=0
#number of TVs in each dir central mol is from edge of inner region
outsize=4
mols_cen=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_sur=['Pc_mola_neut_aniso_cifstruct_chelpg.xyz','Pc_molb_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_Pc_mola_neut.xyz','sp_Pc_molb_neut.xyz']
#From cif:
'''
Pc
_cell_length_a 7.900
_cell_length_b 6.060
_cell_length_c 16.010
_cell_angle_alpha 101.90
_cell_angle_beta 112.60
_cell_angle_gamma 85.80
_cell_volume 692.384
'''
#Get translation vectors:
a=7.900/0.5291772109217
b=6.060/0.5291772109217
c=16.010/0.5291772109217
alpha=101.90*(pi/180)
beta=112.60*(pi/180)
gamma=90*(pi/180)
cif_unit_cell_volume=692.384/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
print 'Job Completed Successfully.'
| [
"[email protected]"
] | |
a1c657b1669648b68aab122a276b9a7e415bd902 | 99701affb7ae46c42c55484f3301d59f79294a10 | /project/Examples/Examples/PP2E/Gui/ShellGui/Old/menugui0.py | afa1fa88bd67a89c2bde4e46238fb0d828516893 | [] | no_license | inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall | 1050b9e9bddb335bf42b7debf2abebe51dd9f9e0 | 0f650a97d8fbaa576142e5bb1745f136b027bc73 | refs/heads/master | 2021-01-21T21:48:21.326372 | 2016-04-06T20:05:19 | 2016-04-06T20:05:19 | 42,902,523 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | #!/usr/local/bin/python
from shellgui import * # type-specific shell interfaces
class TextPak1(ListMenuGui):
def __init__(self):
self.myMenu = [('Pack', self.Pack),
('Unpack', self.Unpack),
('Mtool', self.Missing)]
ListMenuGui.__init__(self)
def forToolBar(self, label):
return label in ['Pack', 'Unpack']
def Pack(self): print 'pack dialog...'
def Unpack(self): print 'unpack dialog...'
def Missing(self): print 'not yet implemented...'
class TextPak2(DictMenuGui):
def __init__(self):
self.myMenu = {'Pack': self.Pack,
'Unpack': self.Unpack,
'Mtool': self.Missing}
DictMenuGui.__init__(self)
def Pack(self): print 'pack dialog...'
def Unpack(self): print 'unpack dialog...'
def Missing(self): print 'not yet implemented...'
if __name__ == '__main__': # self-test code...
from sys import argv
if len(argv) > 1 and argv[1] == 'list':
print 'list test'
TextPak1().mainloop()
else:
print 'dict test'
TextPak2().mainloop()
| [
"[email protected]"
] | |
e71b6313b4cdfca169fb2903c4a7e17fc6731107 | 1a9ec1308a2ade079e95782906e5f8af4ecb403e | /MySortAlgorithm/5_quicksort.py | 89d85a25eed4a5731450f592a3eea1051cc15120 | [] | no_license | HeywoodKing/mytest | f0174f40bb60a7557ac361f566be36ac1b642366 | ac19822dd28e3db60c56b57ba3dd50cb52736c6b | refs/heads/master | 2022-12-12T15:47:28.141969 | 2020-07-24T00:46:01 | 2020-07-24T00:46:01 | 228,036,752 | 0 | 0 | null | 2022-12-08T06:18:26 | 2019-12-14T14:30:54 | Python | UTF-8 | Python | false | false | 2,159 | py | # -*- coding: utf-8 -*-
import time
"""
快速排序 (默认从小到大排序) 两边夹击
对于一个无序序列,取出第一个元素作为标志元素,剩下的元素定义两个头,一个低头,一个高头两个指针,从低指针开始和
标志元素比较,如果小于标志元素,继续查找,如果大于标志元素,停止低指针继续,从高指针一端开始查找,如果大于标志元素,
继续查找,如果小于标志元素,则将该元素和低指针的大于标志元素的交换位置,然后继续从低指针一端继续查找,知道两个指针相遇
,此位置插入标志元素,第一次遍历结束,在重复上述动作,以此类推
"""
# if alist[high] < mid_value:
# alist[low] = alist[high]
# low += 1
# elif :
# high -= 1
#
# if alist[low] < mid_value:
# low += 1
# elif alist[low] > mid_value:
# alist[high] = alist[low]
# high -= 1
# 最优时间复杂度O(nlog以2为底的n) 最坏时间复杂度O(n平方) 空间复杂度增加了 稳定性:不稳定
def quick_sort(alist, first=0, last=0):
# n = len(alist)
if last <= first:
return alist
mid_value = alist[first]
low = first
high = last
# if high == 0:
# high = n - 1
while low < high:
# 高指针方向开始移动(向左移动)
while low < high and alist[high] >= mid_value:
high -= 1
alist[low] = alist[high]
# low += 1
# 低指针方向开始移动(向右移动)
while low < high and alist[low] < mid_value:
low += 1
alist[high] = alist[low]
# high -= 1
alist[low] = mid_value
# llist = quick_sort(alist[:low - 1]) # 左侧序列
# rlist = quick_sort(alist[low + 1:]) # 右侧序列
quick_sort(alist, first, low - 1)
quick_sort(alist, low + 1, last)
return alist
if __name__ == "__main__":
ls = [33, 100, 4, 56, 39, 78, 12, 0, 20, 16]
start_time = time.time()
res = quick_sort(ls, 0, len(ls) - 1)
end_time = time.time()
print("耗时:%s" % (end_time - start_time))
print(res)
| [
"[email protected]"
] | |
1fe5c11c421daf63a4d38aeba349c6f8b5f23f40 | 88cf3aa4eb13cda1790cd930ed2cb8c08964c955 | /chainercv/experimental/links/__init__.py | 25635fc7c593028b9f2ccc131d04205b2c365173 | [
"MIT"
] | permissive | mingxiaoh/chainercv | 6ae854f445b7a14f55e41b51b5e4226224702b95 | cfb7481907efe93e13c729ae2d9df4706d3975a6 | refs/heads/master | 2022-11-11T20:44:31.875419 | 2018-05-03T04:09:13 | 2018-05-03T04:33:15 | 131,939,645 | 1 | 2 | MIT | 2022-10-26T00:07:37 | 2018-05-03T03:58:59 | Python | UTF-8 | Python | false | false | 89 | py | from chainercv.experimental.links.model.fcis.fcis_resnet101 import FCISResNet101 # NOQA
| [
"[email protected]"
] | |
47defa7aab533fca08104fe6ee4ab64b38b50662 | c947309ef03dc6ddd0a8bc0186adbab0089782d5 | /torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py | e0c377132876bbbb528ea07ff24e89333dfce5a5 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | KsenijaS/pytorch | dc1dc2fd7adb982110aa0730d8625a84975c2e94 | 3bdb7af1e79c8e514a8cd4c87333ee7b77a78c36 | refs/heads/master | 2021-06-18T00:04:39.806795 | 2021-04-07T16:52:18 | 2021-04-07T16:52:18 | 191,849,503 | 1 | 1 | NOASSERTION | 2020-04-02T16:47:24 | 2019-06-14T00:14:56 | C++ | UTF-8 | Python | false | false | 33,986 | py | import logging
import math
import numpy as np
import torch
import torch.distributed as dist
from . import default_hooks as default
def _orthogonalize(matrix, epsilon=1e-8):
"""
Applies Gram-Schmidt procedure to orthogonalize a given 2D tensor.
If epsilon is 0, this is equivalent to `torch.qr(matrix, out=(matrix, _))`,
but `torch.qr` is very slow, probably because it is not optimized for a matrix that has a small number of columns.
"""
num_cols = matrix.shape[1]
for i in range(num_cols):
# Normalize the i'th column.
col = matrix[:, i : i + 1]
# If no epsilon is added here, division by zero may be caused by vanishing gradients.
# This epsilon is not needed if the input matrix covers the gradients of at least one entire layer in the neural network.
if epsilon == 0:
# Note that col ** 2 can underflow/overflow if we use FP16.
# May need to consider multiplying a scaling factor and dividing it later, or using bfloat16 instead.
col /= torch.norm(col)
else:
col /= torch.norm(col) + epsilon
# Project it on the rest and remove it.
if i + 1 < num_cols:
rest = matrix[:, i + 1 :]
rest -= torch.sum(col * rest, dim=0) * col
def _should_compress(
num_rows, num_cols, matrix_approximation_rank, min_compression_rate
):
"""
Returns a recommendation as to whether the 2D tensor described by the arguments is worth compressing,
including statistics describing the expected savings from compression. We consider a tensor worth
compressing when ``min_compression_rate`` < uncompressed size / compressed size, where
uncompressed size = ``num_rows`` * ``num_cols``,
and compressed size = (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
The result of this function is a tuple of the form (compression_recommendation, uncompressed_el_count, compressed_el_count), where:
compresion_recommendation is true if the tensor is worth compressing, and false otherwise (see above);
uncompressed_el_count is the uncompressed element count, i.e. ``num_rows`` * ``num_cols``; and,
compress_el_count is the element count after compression, i.e. (``num_rows`` + ``num_cols``) * ``matrix_approximation_rank``.
""" # noqa
uncompressed_size = num_rows * num_cols
compressed_size = (num_rows + num_cols) * matrix_approximation_rank
return (
compressed_size * min_compression_rate < uncompressed_size,
uncompressed_size,
compressed_size,
)
def _report_compression_stats(bucket, state):
"""
Report compression stats at the frequency of `compression_stats_logging_frequency` specified in PowerSGD state.
"""
if (
bucket.is_the_last_bucket_to_allreduce()
and state.iter >= state.next_stats_report
):
stats = state.compression_stats()
logging.info(
"Compression stats: iter {}, total before compression {}, total after compression {}, "
"rate {}".format(state.iter, stats[1], stats[2], stats[0])
)
state.next_stats_report = state.iter + state.compression_stats_logging_frequency
class PowerSGDState(object):
r"""
Stores both the algorithm's hyperparameters and the internal state for all the gradients during the training.
Particularly, ``matrix_approximation_rank`` and ``start_powerSGD_iter`` are the main hyperparameters that should be tuned by the user.
For performance, we suggest to keep binary hyperparameters ``use_error_feedback`` and ``warm_start`` on.
1. ``matrix_approximation_rank`` controls the size of compressed low-rank tensors, which determines the compression rate. The lower the rank, the stronger the compression.
1.1. If ``matrix_approximation_rank`` is too low, the full model quality will need more training steps to reach or will never reach and yield loss in accuracy.
1.2. The increase of ``matrix_approximation_rank`` can substantially increase the computation costs of the compression, and the accuracy may not be futher improved beyond a certain ``matrix_approximation_rank`` threshold.
To tune ``matrix_approximation_rank``, we suggest to start from 1 and increase by factors of 2 (like an expoential grid search, 1, 2, 4, ...), until a satisfactory accuracy is reached. Typically only a small value 1-4 is used. For some NLP tasks (as shown in Appendix D of the original paper), this value has been increased to 32.
2. ``start_powerSGD_iter`` defers PowerSGD compression until step ``start_powerSGD_iter``, and vanilla allreduce runs prior to step ``start_powerSGD_iter``. This hybrid scheme of **vanilla allreduce + PowerSGD** can effectively improve the accuracy, even a relatively small ``matrix_approximation_rank`` is used. This is because that, the beginning of training phase is usually very sensitive to inaccurate gradients, and compressing gradients too early may make the training quickly take a suboptimal trajectory, which can result in an irrecoverable impact on the accuracy.
To tune ``start_powerSGD_iter``, we suggest to start with 10% of total training steps, and increase it until a satisfactory accuracy is reached.
3. ``min_compression_rate`` is the minimum compression rate required when a layer is compressed. Due to the computation overheads incurred by the compression, a tensor is worth compressing only if there can be sufficient saving in bandwidth, where `` (num_rows + num_cols) * matrix_approximation_rank * min_compression_rate < num_rows * num_cols``. If the specified compression rate threshold cannot be satisfied, the tensor will be directly allreduced without compression.
Compression statistics are logged every ``compression_stats_logging_frequency`` iterations once PowerSGD compression starts.
.. warning ::
If error feedback or warm-up is enabled, the minimum value of ``start_powerSGD_iter`` allowed in DDP is 2.
This is because there is another internal optimization that rebuilds buckets at iteration 1 in DDP,
and this can conflict with any tensor memorized before the rebuild process.
""" # noqa
__slots__ = [
"process_group",
# The three fields below are the hyperparameters that should be tuned by the user.
"matrix_approximation_rank",
"start_powerSGD_iter",
"min_compression_rate",
# The two fields below are the binary hyperparameters recommended to be turned on for performance.
"use_error_feedback",
"warm_start",
# The fields below are internal state.
"rng",
"error_dict",
"p_memory_dict",
"q_memory_dict",
"iter",
# The fields below are for recording compression stats.
"total_numel_before_compression",
"total_numel_after_compression",
"compression_stats_logging_frequency",
"next_stats_report",
]
def __init__(
self,
process_group,
matrix_approximation_rank=1,
start_powerSGD_iter=10,
min_compression_rate=2,
use_error_feedback=True,
warm_start=True,
random_seed=0,
compression_stats_logging_frequency=10_000,
):
logging.info(
"PowerSGD config: matrix_approximation_rank = {}; start_powerSGD_iter = {}; "
"min_compression_rate = {}; use_error_feedback = {}; warm_start = {}; "
"random_seed = {}; compression_stats_logging_frequency = {}".format(
matrix_approximation_rank,
start_powerSGD_iter,
min_compression_rate,
use_error_feedback,
warm_start,
random_seed,
compression_stats_logging_frequency,
)
)
self.process_group = process_group
self.matrix_approximation_rank = matrix_approximation_rank
# Deferring PowerSGD compression util step 'start_powerSGD_iter' can have two advantages:
# 1) It turns out that PowerSGD may lead to a non-trivial accuracy loss,
# even if the matrix approximation rank is increased to a large value.
# To mitigate the accuracy loss, a simple yet effective way is mixing vanilla allreduce
# (or a more conservative compression such as FP16 compression) with PowerSGD.
# 2) There is an internal optimization of rebuilding buckets process in DDP,
# in order to save the memory space.
# This step takes place after the first iteration.
# However, this means that the shape of input bucketized tensors is subject to change,
# which will complicate the implementations of error feedback and warm-up.
# Running vanilla allreduce in the first few iterations can avoid this complexity.
if (use_error_feedback or warm_start) and start_powerSGD_iter <= 1:
raise ValueError(
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP."
)
self.start_powerSGD_iter = start_powerSGD_iter
self.min_compression_rate = min_compression_rate
# Error feedback is usually crucial for both for convergence and generalization,
# because PowerSGD is a biased compressor,
# i.e., compressing and decompressing a random gradient does not yield the original in expectation.
# This mechanism requires a temporary copy of the input gradients,
# so it increases the peak memory consumption by the size of the gradient tensor.
# However, if the target matrices are known to be exactly low-ranked (instead of just low stable rank),
# sometimes it is possible to converge to the optima without error feedback.
# See: http://proceedings.mlr.press/v54/yurtsever17a/yurtsever17a.pdf
self.use_error_feedback = use_error_feedback
# Warm-start reuses P(s) and Q(s) from the previous iteration.
# This can improve the approximation quality and hence improve the accuracy.
# Additionally, by avoiding the initialization of these low-rank tensors at every step,
# this can also accelerate training.
# However, this is at the cost of extra memory.
self.warm_start = warm_start
# The purpose of this RNG is to generate different random seeds for initializing Q across iterations,
# but in the same order for all the DDP replicas.
# Different random seeds across iterations indicate different 'projections' of the gradients at different SGD steps.
# If the same random projection is used,
# there will be differences between the gradients that are never synchronized.
self.rng = np.random.RandomState(random_seed)
# Since there is only a single state instance for all the input buckets,
# need to maintain a dictionary that maps each bucket index to the local error.
self.error_dict = {}
self.p_memory_dict = {}
self.q_memory_dict = {}
# Iteration/step in the training loop.
self.iter = 0
# Compression stats accumulators
self.total_numel_before_compression = 0
self.total_numel_after_compression = 0
# We'll report compression stats every 'compression_stats_logging_frequency' iterations
# Note that we always report compression stats at least once.
self.compression_stats_logging_frequency = max(
1, compression_stats_logging_frequency
)
self.next_stats_report = 0
def maybe_increase_iter(self, bucket):
# Since bucket 0 is the last bucket to allreduce in an iteration.
# Only increase `iter` when bucket 0 is processed.
if bucket.is_the_last_bucket_to_allreduce():
self.iter += 1
if self.iter == self.start_powerSGD_iter:
logging.info(
"Start to apply PowerSGD after {} iterations.".format(self.iter)
)
def compression_stats(self):
r"""
Returns the latest compression statistics as a tuple of the form (compress_rate, numel_before_compression, numel_after_compression), where:
compress_rate is the effective compression rate i.e. (number of elements before compression) / (number of elements after compression);
numel_before_compression is the total number of elements before compression was applied; and,
numel_after_compression is the total number of elements after compression was applied.
""" # noqa
compress_rate = (
self.total_numel_before_compression / self.total_numel_after_compression
if self.total_numel_after_compression > 0
else 0
)
return (
compress_rate,
self.total_numel_before_compression,
self.total_numel_after_compression,
)
def powerSGD_hook(
state: PowerSGDState, bucket: dist.GradBucket
) -> torch.futures.Future:
r"""
This DDP communication hook implements PowerSGD gradient compression
algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
Once gradient tensors are aggregated across all workers, this hook applies
compression as follows:
1. Views the input flattened 1D gradient tensor as a list of per-parameter tensors, and divides all the tensors into two groups:
1.1 The tensors that should be compressed before allreduce, because the compression can give enough saving in bandwidth.
1.2 Rest of the tensors will be directly allreduced without compression, including all the vector tensors (for biases).
2. Handles uncompressed tensors:
2.1. Allocate contiguous memory for those uncompressed tensors, and allreduces all the uncompressed tensors as a batch, without compression;
2.2. Copies the individual uncompressed tensors from the contiguous memory back to the input tensor.
3. Handles the tensors that should be compressed by PowerSGD compression:
3.1. For each tensor M, creates two low-rank tensors P and Q for decomposing M,
such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
3.2. Computes each P in Ps, which is equal to MQ;
3.3. Allreduces Ps as a batch;
3.4. Orthogonalizes each P in Ps;
3.5. Computes each Q in Qs, which is approximately equal to M^TP;
3.6. Allreduces Qs as a batch;
3.7. Computes each M among all the compressed tensors, which is approximately equal to PQ^T.
Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
This not only gives the user more control over the tradeoff between speedup and accuracy,
but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
Args:
state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
To tune the compression configs, mainly need to tune ``matrix_approximation_rank``, ``start_powerSGD_iter``
and ``min_compression_rate``.
bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
Note that since DDP comm hook only supports single process single device mode at this time,
only exactly one tensor is stored in this bucket.
Returns:
Future handler of the communication, which updates the gradients in place.
Example::
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1,
start_powerSGD_iter=10, min_compression_rate=0.5)
>>> ddp_model.register_comm_hook(state, powerSGD_hook)
""" # noqa
process_group = state.process_group
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
# The input tensor is a flattened 1D tensor.
input_tensor = bucket.get_tensors()[0]
# Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
if state.iter < state.start_powerSGD_iter:
state.maybe_increase_iter(bucket)
return default._allreduce_fut(group_to_use, input_tensor)
# Apply PowerSGD after `start_powerSGD_iter` iterations.
device = input_tensor.device
dtype = input_tensor.dtype
# Incorporate the error from the previous state into the gradients.
bucket_index = bucket.get_index()
input_tensor_cp = None
total_length = input_tensor.shape[0]
if state.use_error_feedback:
if bucket_index in state.error_dict:
input_tensor.add_(state.error_dict[bucket_index])
else:
logging.info(
"A zero tensor of length {} that represents local error is created.".format(
total_length
)
)
state.error_dict[bucket_index] = torch.zeros(
total_length, device=device, dtype=dtype
)
# Keep a copy of the input tensor,
# so that we can compute the local error caused by compression later,
# by comparing this copy and the input tensor updated after decompression.
input_tensor_cp = torch.clone(input_tensor).detach()
# Unflatten the input tensor into per-parameter tensors, for layer-wise compression.
tensors = bucket.get_per_parameter_tensors()
# Step I: Divide all the tensors into two groups,
# one will be compressed before allreduce and the other will be directly allreduced without compression.
tensors_to_compress, uncompressed_tensors = [], []
total_Ps_size = 0
total_Qs_size = 0
for tensor in tensors:
matrix = tensor.view(tensor.shape[0], -1)
n, m = matrix.shape
matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
compress_test = _should_compress(
n, m, matrix_approximation_rank, state.min_compression_rate
)
state.total_numel_before_compression += compress_test[1]
if compress_test[0]:
tensors_to_compress.append(matrix)
total_Ps_size += n * matrix_approximation_rank
total_Qs_size += m * matrix_approximation_rank
state.total_numel_after_compression += compress_test[2]
else:
uncompressed_tensors.append(tensor)
state.total_numel_after_compression += compress_test[1]
_report_compression_stats(bucket, state)
# Step II: Handle uncompressed tensors.
# Allocate contiguous memory for these tensors to allreduce efficiently.
uncompressed_tensors_memory = (
torch.cat([tensor.view(-1) for tensor in uncompressed_tensors])
if uncompressed_tensors
else torch.tensor([], device=device, dtype=dtype)
)
# Step III: Handle the tensors that should be compressed.
# Allocate contiguous memory for Ps and Qs to allreduce efficiently.
# If warm-start is enabled, reuse Ps and Qs from the previous iteration if possible.
# The memory spaces of Ps and Qs need to be allocated in the first iteration when PowerSGD is applied.
need_randomize_qs = False
if not state.warm_start or bucket_index not in state.p_memory_dict:
need_randomize_qs = True
# If warm-start is disabled, low-rank tensors will be initialized at every step.
# Only log this if warm-start to avoid spamming.
if state.warm_start:
logging.info(
"Allocating contiguous memory of length {} for Ps, and of length {} for Qs, respectively.".format(
total_Ps_size, total_Qs_size
)
)
state.p_memory_dict[bucket_index] = torch.empty(
total_Ps_size, device=device, dtype=dtype
)
state.q_memory_dict[bucket_index] = torch.empty(
total_Qs_size, device=device, dtype=dtype
)
# Create Ps and Qs that point to the allocated memory.
ps = []
qs = []
p_idx = 0
q_idx = 0
for tensor in tensors_to_compress:
n, m = tensor.shape
matrix_approximation_rank = min(n, m, state.matrix_approximation_rank)
ps.append(
state.p_memory_dict[bucket_index][
p_idx : p_idx + n * matrix_approximation_rank
].view(n, matrix_approximation_rank)
)
qs.append(
state.q_memory_dict[bucket_index][
q_idx : q_idx + m * matrix_approximation_rank
].view(m, matrix_approximation_rank)
)
p_idx += n * matrix_approximation_rank
q_idx += m * matrix_approximation_rank
# If warm-start is enabled, reuse Qs from the previous iteration if possible and skip filling random values.
# The exception is the first iteration when PowerSGD is applied.
if not need_randomize_qs:
for q in qs:
_orthogonalize(q)
else:
with torch.random.fork_rng(devices=[]):
# Fork this RNG to avoid changing the seed globally and affecting the random sampling anywhere else in the training.
# The seed makes sure that the initial random values are the same across all the DDP replicas.
# This seed should differ at every step.
# Since it is very slow to fork RNG state across all the CUDA devices,
# only fork on CPU and then move the generated tensor to the CUDA device (by overwriting q).
torch.manual_seed(state.rng.randint(1_000_000_000))
for q in qs:
q.copy_(
torch.randn(
*q.shape,
device="cpu",
dtype=dtype,
)
)
_orthogonalize(q)
# Compute Ps.
for tensor, q, p in zip(tensors_to_compress, qs, ps):
torch.matmul(tensor, q, out=p)
# This allreduce is only applied to uncompressed tensors,
# so it should have been kicked off before the above computation on the compressed tensors to hide more communication costs.
# However, this somehow requires a separate future chain at this time.
allreduce_contiguous_uncompressed_tensors_fut = dist.all_reduce(
uncompressed_tensors_memory, group=group_to_use, async_op=True
).get_future()
def unpack_uncompressed_tensors_and_allreduce_ps(fut):
uncompressed_tensors_memory = fut.value()[0].div_(world_size)
idx = 0
for tensor in uncompressed_tensors:
tensor.copy_(
uncompressed_tensors_memory[idx : idx + tensor.numel()].view_as(tensor)
)
idx += tensor.numel()
# Since these Ps will be orthogonalized later, no need to divide them by world size.
return [
dist.all_reduce(
state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
]
def compute_qs(fut):
state.p_memory_dict[bucket_index] = fut.value()[0]
for p in ps:
_orthogonalize(p)
# Compute Qs.
for tensor, p, q in zip(tensors_to_compress, ps, qs):
torch.matmul(tensor.t(), p, out=q)
# TODO: The above procedure does two matmul+allreduce steps per iteration --
# one left multiplication and one right multiplication.
# For warm-start, can take one such step at a time, and alternate between them.
# Allreduce Qs.
return [
dist.all_reduce(
state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
]
def decompress(fut):
state.q_memory_dict[bucket_index] = fut.value()[0].div_(world_size)
for p, q, tensor in zip(ps, qs, tensors_to_compress):
torch.matmul(p, q.t(), out=tensor)
if torch.cuda.is_available():
torch.cuda.synchronize(device)
if state.use_error_feedback:
# Memorize the local errors.
state.error_dict[bucket_index] = input_tensor_cp - input_tensor
if not state.warm_start:
state.p_memory_dict.clear()
state.q_memory_dict.clear()
state.maybe_increase_iter(bucket)
return [input_tensor]
return (
allreduce_contiguous_uncompressed_tensors_fut.then(
unpack_uncompressed_tensors_and_allreduce_ps
)
.then(compute_qs)
.then(decompress)
)
def batched_powerSGD_hook(
state: PowerSGDState, bucket: dist.GradBucket
) -> torch.futures.Future:
r"""
This DDP communication hook implements a simplified PowerSGD gradient compression
algorithm described in the `paper <https://arxiv.org/abs/1905.13727>`_.
This variant does not compress the gradients layer by layer,
but instead compresses the flattened input tensor that batches all the gradients.
Therefore, it is **faster** than :meth:`powerSGD_hook`,
but usually results in a **much lower accuracy**, unless ``matrix_approximation_rank`` is 1.
.. warning ::
Increasing ``matrix_approximation_rank`` here may not necessarily increase the accuracy,
because batching per-parameter tensors without column/row alignment can destroy low-rank structure.
Therefore, the user should always consider :meth:`powerSGD_hook` first,
and only consider this variant when a satisfactory accuracy can be achieved when ``matrix_approximation_rank`` is 1.
Once gradient tensors are aggregated across all workers, this hook applies
compression as follows:
1. Views the input flattened 1D gradient tensor as a square-shaped tensor M with 0 paddings;
2. Creates two low-rank tensors P and Q for decomposing M, such that M = PQ^T, where Q is initialized from a standard normal distribution and orthogonalized;
3. Computes P, which is equal to MQ;
4. Allreduces P;
5. Orthogonalizes P;
6. Computes Q, which is approximately equal to M^TP;
7. Allreduces Q;
8. Computes M, which is approximately equal to PQ^T.
9. Truncates the input tensor to the original length.
Note that this communication hook enforces vanilla allreduce for the first ``state.start_powerSGD_iter`` iterations.
This not only gives the user more control over the tradeoff between speedup and accuracy,
but also helps abstract away some complexity of the internal optimization of DDP for future communication hook developers.
Args:
state (PowerSGDState): State information to configure the compression rate and support error feedback, warm start, etc.
To tune the compression configs, mainly need to tune ``matrix_approximation_rank`` and ``start_powerSGD_iter``.
bucket (dist.GradBucket): Bucket that stores a 1D flattened gradient tensor that batches multiple per-variable tensors.
Note that since DDP comm hook only supports single process single device mode at this time,
only exactly one tensor is stored in this bucket.
Returns:
Future handler of the communication, which updates the gradients in place.
Example::
>>> state = PowerSGDState(process_group=process_group, matrix_approximation_rank=1)
>>> ddp_model.register_comm_hook(state, batched_powerSGD_hook)
""" # noqa
process_group = state.process_group
group_to_use = process_group if process_group is not None else dist.group.WORLD
world_size = group_to_use.size()
# The input tensor is a flattened 1D tensor.
input_tensor = bucket.get_tensors()[0]
# Run vanilla allreduce in the first `start_powerSGD_iter` iterations.
if state.iter < state.start_powerSGD_iter:
state.maybe_increase_iter(bucket)
return default._allreduce_fut(group_to_use, input_tensor)
# Apply PowerSGD after `start_powerSGD_iter` iterations.
device = input_tensor.device
total_length = input_tensor.shape[0]
state.total_numel_before_compression += total_length
# View the input tensor as a 2D square-shape tensor, and pad 0s if necessary.
square_side_length = math.ceil(math.sqrt(total_length))
state.total_numel_after_compression += (
square_side_length * state.matrix_approximation_rank * 2
)
padded_total_length = square_side_length ** 2
input_tensor.resize_(padded_total_length)
input_tensor[total_length:padded_total_length].fill_(0)
_report_compression_stats(bucket, state)
# Incorporate the error from the previous state into the gradients.
bucket_index = bucket.get_index()
input_tensor_cp = None
if state.use_error_feedback:
if bucket_index in state.error_dict:
input_tensor.add_(state.error_dict[bucket_index])
else:
logging.info(
"A zero tensor of length {} that represents local error is created.".format(
padded_total_length
)
)
state.error_dict[bucket_index] = torch.zeros(
padded_total_length, device=device, dtype=input_tensor.dtype
)
# Keep a copy of the input tensor,
# so that we can compute the local error caused by compression later,
# by comparing this copy and the input tensor updated after decompression.
input_tensor_cp = torch.clone(input_tensor).detach()
matrix = input_tensor.view(square_side_length, square_side_length)
# Reuse P and Q from the previous iteration if possible.
# The memory spaces of P and Q need to be allocated in the first iteration when PowerSGD is applied.
if not state.warm_start or bucket_index not in state.p_memory_dict:
# If warm-start is disabled, low-rank tensors will be initialized at every step.
# Only log this if warm-start to avoid spamming.
if state.warm_start:
logging.info(
"Initializing low-rank tensors P and Q, each of which has a shape of {} x {}.".format(
square_side_length, state.matrix_approximation_rank
)
)
def create_low_rank_tensor(fill_random_values, rng):
"Returns a low-rank 2D tensor of square_side_length * matrix_approximation_rank."
if fill_random_values:
with torch.random.fork_rng(devices=[]):
# Fork this RNG to avoid changing the seed globally and affecting the random sampling
# anywhere else in the training.
# The seed makes sure that the initial random values are the same across all the DDP replicas.
# This seed should differ at every step.
# Since it is very slow to fork RNG state across all the CUDA devices,
# only fork on CPU and then move the generated tensor to the CUDA device.
torch.manual_seed(rng.randint(1_000_000_000))
return torch.randn(
square_side_length,
state.matrix_approximation_rank,
device="cpu",
dtype=input_tensor.dtype,
).to(device)
else:
return torch.empty(
square_side_length,
state.matrix_approximation_rank,
device=device,
dtype=input_tensor.dtype,
)
state.p_memory_dict[bucket_index] = create_low_rank_tensor(
fill_random_values=False, rng=state.rng
)
state.q_memory_dict[bucket_index] = create_low_rank_tensor(
fill_random_values=True, rng=state.rng
)
_orthogonalize(state.q_memory_dict[bucket_index], 0)
torch.matmul(
matrix, state.q_memory_dict[bucket_index], out=state.p_memory_dict[bucket_index]
)
allreduce_p_fut = dist.all_reduce(
state.p_memory_dict[bucket_index], group=group_to_use, async_op=True
).get_future()
def compute_q(fut):
state.p_memory_dict[bucket_index] = fut.value()[0]
_orthogonalize(state.p_memory_dict[bucket_index], 0)
torch.matmul(
matrix.t(),
state.p_memory_dict[bucket_index],
out=state.q_memory_dict[bucket_index],
)
# TODO: The above procedure does two matmul+allreduce steps per iteration --
# one left multiplication and one right multiplication.
# For warm-start, can take one such step at a time, and alternate between them.
return [
dist.all_reduce(
state.q_memory_dict[bucket_index], group=group_to_use, async_op=True
)
.get_future()
.wait()[0]
]
def decompress(fut):
state.q_memory_dict[bucket_index] = fut.value()[0].div_(world_size)
torch.matmul(
state.p_memory_dict[bucket_index],
state.q_memory_dict[bucket_index].t(),
out=matrix,
)
if state.use_error_feedback:
# Memorize the local errors.
state.error_dict[bucket_index] = input_tensor_cp - input_tensor
# Removing this seemingly unnecessary sync somehow may cause faliures.
# See: https://github.com/pytorch/pytorch/pull/54838
if torch.cuda.is_available():
torch.cuda.synchronize(device)
if not state.warm_start:
state.p_memory_dict.clear()
state.q_memory_dict.clear()
ret = input_tensor.resize_(total_length)
state.maybe_increase_iter(bucket)
return [ret]
return allreduce_p_fut.then(compute_q).then(decompress)
| [
"[email protected]"
] | |
39618f68fe15ead1d408d92e16a6c74de256073d | 970565892722ac73038688343eddcd89f856e72b | /code/part2/mapper_ip_to_hits.py | b83031092aee84bcba8eb5f3a36e2ef278987ca5 | [] | no_license | dqian96/udacity-intro-to-hadoop-and-mapreduce | 558d50881d6c20b8ef0c02f130d9b2b10857d547 | 0f5c58c60a47530ca8cac15b353f8a00b0efde0b | refs/heads/master | 2021-06-08T20:54:33.486437 | 2016-11-26T00:20:01 | 2016-11-26T00:20:01 | 67,740,455 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | #!/usr/bin/python
# Maps page to number of hits.
# Map("log_chunk_2", "10.22.33...") -> [("1.1.1.1", 1),...,("1.1.1.1",2)]
import sys
import re
REGEX_COMMON_LOG_FORMAT = '(^[(\d{1,3}\.)]+\d{1,3}) (.+) (.+) \[(\d{2}/\w{3}/\d{4}):(\d{2}:\d{2}:\d{2}) -(\d{4})] "(\w+) (.+) ([\w\\/\.]+)[ ]*" (\d{3}) (-|\d+)$'
def parseLine(line):
line = line.strip('\n')
return re.match(REGEX_COMMON_LOG_FORMAT, line).groups()
for line in sys.stdin:
data = parseLine(line)
print data[0] + '\t' + "1"
| [
"[email protected]"
] | |
0277ed8e4adb2a696e72cade9a86945c5ded6646 | f0581fa08ef790606ca019890a2233f91b1c42a7 | /PythonSrc/greeting.py | 8864f7f80db107d64aa5b5a0cedba6491ca106ab | [] | no_license | jaycoskey/IntroToPythonCourse | de758f0dd0a1b541edb2ef4dcc20950a8d8788bb | d1373ec6602584a6791fd48d37ae66ff5f104487 | refs/heads/master | 2023-02-22T16:32:50.533091 | 2021-01-27T08:22:14 | 2021-01-27T08:22:14 | 333,007,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | #!/usr/bin/env python3
name = input("What's your name? ")
print('Hello, ' + name + '!')
| [
"[email protected]"
] | |
7526d4b5dca68fde0208ee88c56ae5e6478b6ba5 | 85c426913d63773c4802a4a3c354df909030654b | /python/FA3/Integration/Group 1/Group 1/PyFood/functionality/searchfunctions.py | 30ddd14491b0ecaac3a8af817284a9985908b5ee | [] | no_license | SensehacK/playgrounds | 17bf2a3133db6c0cafe185c4cc2c7b59862980aa | 3decd550cdb6034db8b497051acaaec8221073aa | refs/heads/master | 2023-05-11T20:05:31.680168 | 2023-04-30T00:01:58 | 2023-04-30T00:01:58 | 159,632,542 | 1 | 0 | null | 2023-03-05T11:34:34 | 2018-11-29T08:27:53 | Python | UTF-8 | Python | false | false | 2,668 | py | '''
Created on Mar 16, 2017
@author: gaurav.sainger
'''
# from database.ViewDB import search_restaurants
from validations import Validate
from exceptions import CustomException2
from functionality import filtersearch
def search_as_guest():
try:
city=input("Enter your city:")
area=input("Enter your area:")
# city=city1.upper()
# area=area1.upper()
list_of_restaurants=Validate.validate_search_category(city,area)
'''
Print the details
'''
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("resturantname type of food likes dislikes rating")
for select in list_of_restaurants:
print(select.get_restaurantname()," ",select.get_type_of_food()," " , select.get_likes()," ",select.get_dislikes()," ",select.get_rating())
print()
choice=input("Do you want to filter or select restaurant?(F/S)")
try:
if (choice.upper()=="F"):
filtersearch.Filter_search(city,area)
except CustomException2.Invalidfilter as e:
print(e)
except Exception as e:
print("Choose F or S")
print(e)
if (choice.upper()=="S"):
resturant_name=input("Enter the resturant name:")
print("")
except CustomException2.InvalidCategoryException as e:
print(e)
except Exception as e:
print(e)
print()
def search_as_login(city,area):
try:
list_of_restaurants=Validate.validate_search_category(city,area)
print("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("resturantname type of food likes dislikes rating")
for select in list_of_restaurants:
print(select.get_restaurantname()," ",select.get_type_of_food()," " , select.get_likes()," ",select.get_dislikes()," ",select.get_rating())
print()
choice=input("Do you want to filter or select restaurant?(F/S)")
if (choice=="F"):
filtersearch.Filter_search()
if (choice=="S"):
resturant_name=input("Enter the resturant name:")
print("")
except InvalidCategoryException as e:
print(e)
except Exception as e:
print("Sorry. Some system error occurred")
print(e)
print()
| [
"[email protected]"
] | |
d015a810f9644b95079e6ab7f4d4040c9b35aff6 | 6e67656c0fc79939f97a42d5ada57ab5b935b615 | /mmic_ffpa/models/__init__.py | c98ed853b48793f90506b2200e550a3441de9917 | [
"BSD-3-Clause"
] | permissive | pk-organics/mmic_ffpa | 50bd058f1952b2f1f5409e89e5d83a0f7cc06a7f | 6c61a93ed95a89cc06cf294674460f497f158530 | refs/heads/master | 2023-04-01T10:17:10.956970 | 2021-03-25T22:11:31 | 2021-03-25T22:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from . import input
from . import output
from .input import *
from .output import *
| [
"[email protected]"
] | |
08b0f6e23267d53449ba1b735756c83ac36b41d3 | 8cd0dc8d216130e4042c68a6ca79a80315a3037f | /venv/bin/easy_install | 676950ea3601ddf278115f78544067ccdd65c540 | [] | no_license | mutaihillary/Nsokoo | d5556bc35d2b12b08c9247c0bbd230c89657c390 | e84d74755e60e83800e64343aa12639c9bd79804 | refs/heads/master | 2021-01-17T18:35:34.374280 | 2016-10-11T16:10:25 | 2016-10-11T16:10:25 | 69,482,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/home/kipkoech/Repos/flaskwebsite/app/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
2fab840e3c8c56b96c177cfe1d3791bad4b10365 | 3307766701d680af6d12a726a2d98df2cb1830e5 | /jams/gcj/2011/1C/C.py | 6a9956971f4be9063aaf7c2938656157a3b9c2e9 | [] | no_license | dpaneda/code | c1a54037a275fa7044eb5c2d6079f052dd968615 | 7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff | refs/heads/master | 2023-01-07T18:41:00.816363 | 2022-12-30T09:24:22 | 2022-12-30T09:24:22 | 1,583,913 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | #!/usr/bin/python
# This works well and its clean but not good enought to fight agains large version :)
import sys
def Solve():
[n, l, h] = map(int,sys.stdin.readline().split())
notes = map(int,sys.stdin.readline().split())
index = l
while index < h+1:
freq = True
for i in notes:
if i % index != 0 and index % i != 0:
freq = False
break
if freq == True:
return jeff_note
index += 1
return "NO"
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print "Case #%d: %s" % (case, Solve())
| [
"[email protected]"
] | |
ffef77aa03d7bd6a4d97dc2d6033bc634767da14 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-881.py | 96b180ba3614359c67033c631417e455b48fcf2c | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,752 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> $ID:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
3aebfa43822cf14ee8d21ed70f1c53729b30e15c | 5dfe17514dc43d32c7945cc60b178fe58b15abc8 | /Projects/Halma Game/Developing/AIClassTeam.py | ba3f99fdfbe673e651e4baf2933f85c647849f85 | [
"MIT"
] | permissive | cholazzzb/A-Study-of-AI | 79fd391400addf1d3fcc12630691b0d45033f40f | b069d536eb344a363d1b042086926d026afc0360 | refs/heads/master | 2021-01-08T23:42:14.988760 | 2020-05-02T06:18:27 | 2020-05-02T06:18:27 | 242,178,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,544 | py | # -*- coding: utf-8 -*-
"""
Create : 16 April 2020 16.23
Last Update : 16 April 2020
@author: Toro
"""
from copy import deepcopy
# FORMAT (vertical or y, horizontal or x)
startPositions = {
101: (0, 0), 102: (0, 1), 103: (1, 0), 104: (0, 2), 105: (1, 1), 106: (2, 0), 107: (0, 3), 108: (1, 2), 109: (2, 1), 110: (3, 0), 111: (1, 3), 112: (2, 2), 113: (3, 1),
201: (0, 9), 202: (0, 8), 203: (1, 9), 204: (0, 7), 205: (1, 8), 206: (2, 9), 207: (0, 6), 208: (1, 7), 209: (2, 8), 210: (3, 9), 211: (1, 6), 212: (2, 7), 213: (3, 8),
301: (9, 9), 302: (9, 8), 303: (8, 9), 304: (9, 7), 305: (8, 8), 306: (7, 9), 307: (9, 6), 308: (8, 7), 309: (7, 8), 310: (6, 9), 311: (8, 6), 312: (7, 7), 313: (6, 8),
401: (9, 0), 402: (9, 1), 403: (8, 0), 404: (9, 2), 405: (8, 1), 406: (7, 0), 407: (9, 3), 408: (8, 2), 409: (7, 1), 410: (6, 0), 411: (8, 3), 412: (7, 2), 413: (6, 1)
}
class Piece(object):
def __init__(self, name, position):
if round(name/100) == 1:
self.player = 1
if round(name/100) == 2:
self.player = 2
if round(name/100) == 3:
self.player = 3
if round(name/100) == 4:
self.player = 4
self.name = name
self.position = position
# NOTES : The variables are pair ex: legalMoves[x] has rangeLegalMoves[x], lenLegalMoves[x], etc
self.legalMoves = []
# delta = (yAfterMove - ybeforeMove, xAfterMove - xbeforeMove)
self.deltaLegalMoves = []
self.lenLegalMoves = []
self.pieceDirections = []
self.isAtDestination = False
def setPieceDirections(self, pieceDirections):
self.pieceDirections = pieceDirections
def clearLegalMoves(self):
self.legalMoves = []
self.deltaLegalMoves = []
self.lenLegalMoves = []
def updatePosition(self, decision):
if decision[0] != None:
self.position = decision[0][-1]
# print('UPDATE POSITION')
# print('PIECE ', self.name)
# print('NEW POSITION ', self.position)
def checkIsAtDestination(self):
y, x = self.rangeWithDestination
if y+x < 4:
self.isAtDestination = True
else:
self.isAtDestination = False
def calculateLenLegalMoves(self):
# print(self.legalMoves)
if self.legalMoves[0] != None:
for legalMove in self.legalMoves:
self.lenLegalMoves.append(len(legalMove[0]))
# print('LEN LEGAL MOVES', self.lenLegalMoves)
def calculateDeltaLegalMoves(self, destinationPoints):
if self.legalMoves[0] != None:
yDest, xDest = destinationPoints[self.player-1]
for legalMove in self.legalMoves:
yNew, xNew = legalMove[0][-1]
yOld, xOld = legalMove[1]
rangeNew = (abs(yNew-yDest), abs(xNew-xDest))
rangeOld = (abs(yOld-yDest), abs(xOld-xDest))
yDelta, xDelta = (rangeOld[0]-rangeNew[0], rangeOld[1]-rangeNew[1])
self.deltaLegalMoves.append((yDelta, xDelta))
# print('DELTA LEGAL MOVES', self.deltaLegalMoves)
def calculateRangeWithDestination(self, destinationPoints):
y, x = destinationPoints[self.player-1]
ypos, xpos = self.position
self.rangeWithDestination = (abs(y-ypos), abs(x-xpos))
# print("RANGE WITH DESTINATION", self.rangeWithDestination)
class PonderBoard(object):
def __init__(self):
self.piecesPosition = startPositions
self.realBoard = []
# FP -> Furthest Piece from corner destination (0,0) or (0,9) or (9,9) or (9,0)
self.FPName = 101
self.NPName = 113 # NP -> Nearest Piece from idem
self.rangeBetweenFPNP = (2, 2)
# Furthest / Nearest from Destination Point
self.furthestPiece = 0
self.nearestPiece = 0
def updateRealBoard(self, newBoard):
self.realBoard = newBoard
def isSquareInBoard(self, position):
y, x = position
if y > -1 and x > -1 and y < 10 and x < 10:
return True
else:
return False
def isPieceInTheSquare(self, position):
y, x = position
if self.realBoard[y][x] == 0:
return False
else:
return True
def getDirectionToTarget(self, Piece, target):
yDir = target[0] - Piece.position[0]
xDir = target[1] - Piece.position[1]
directions = []
if yDir > 0:
yDir = 1
elif yDir < 0:
yDir = -1
if xDir > 0:
xDir = 1
elif xDir < 0:
xDir = 0
return (yDir, xDir)
def checkPieceGeserMoves(self, Piece):
y0, x0 = Piece.position
for pieceDirection in Piece.pieceDirections:
y, x = pieceDirection
if self.isSquareInBoard((y0+y, x0+x)):
if self.isPieceInTheSquare((y0+y, x0+x)) == False:
Piece.legalMoves.append([[(y0+y, x0+x)], (y0, x0), 0])
if len(Piece.legalMoves) == 0:
Piece.legalMoves = [None, None, 2]
def checkPieceJumpMoves(self, newPiecePosition, pieceDirections):
y0, x0 = newPiecePosition
jumpMoves = []
for pieceDirection in pieceDirections:
y, x = pieceDirection
if self.isSquareInBoard((y0+y, x0+x)) and self.isSquareInBoard((y0+y+y, x0+x+x)):
if self.isPieceInTheSquare((y0+y, x0+x)) and self.isPieceInTheSquare((y0+y+y, x0+x+x)) == False:
jumpMoves.append([(y0+y+y, x0+x+x)])
return jumpMoves
def checkPieceLegalMoves(self, Piece):
# print()
# print('-----', Piece.name, "-----")
# Check Jumps Moves
# First Loop
jumpMoves = self.checkPieceJumpMoves(Piece.position, Piece.pieceDirections)
# print('JUMPMOVES', jumpMoves)
# After First Loop
if len(jumpMoves) != 0:
isCheckAgain = True
else:
isCheckAgain = False
# print('IS CHECK AGAIN', isCheckAgain)
while isCheckAgain:
for jumpMove in jumpMoves:
oldMove = jumpMoves.pop(0)
analyzeMove = deepcopy(oldMove[-1])
# print('OLDMOVE', oldMove)
# print('ANALYZEMOVE', analyzeMove)
# print('JUMPMOVES', jumpMoves)
analyzeResults = self.checkPieceJumpMoves(analyzeMove, Piece.pieceDirections)
# print('ANALYZE RESULTS BEFORE', analyzeResults)
isCheckAgain = False
if len(analyzeResults) != 0:
for analyzeResult in analyzeResults:
if len(oldMove) > 1:
positionBeforeAnalyzeMove = oldMove[-2]
yBefore, xBefore = positionBeforeAnalyzeMove
# print('POSITION BEFORE ANALYZE MOVE', positionBeforeAnalyzeMove)
analyzeResult = analyzeResult[0]
# print('ANALYZE RESULTS AFTER', analyzeResult)
yAfter, xAfter = analyzeResult
if yAfter != yBefore or xAfter != xBefore:
newMove = deepcopy(oldMove)
newMove.append(analyzeResult)
# print('NEWMOVE', newMove)
jumpMoves.append(newMove)
isCheckAgain = True
else:
newMove = deepcopy(oldMove)
newMove.append(analyzeResult[0])
# print('NEWMOVE', newMove)
jumpMoves.append(newMove)
isCheckAgain = True
else:
jumpMoves.append(oldMove)
# print('IS CHECK AGAIN', isCheckAgain)
# print('JUMP MOVES', jumpMoves)
for jumpMove in jumpMoves:
Piece.legalMoves.append([jumpMove, Piece.position, 1])
# Check Geser Moves
y0, x0 = Piece.position
for pieceDirection in Piece.pieceDirections:
y, x = pieceDirection
if self.isSquareInBoard((y0+y, x0+x)):
if self.isPieceInTheSquare((y0+y, x0+x)) == False:
Piece.legalMoves.append([[(y0+y, x0+x)], (y0, x0), 0])
if len(Piece.legalMoves) == 0:
Piece.legalMoves = [None, None, 2]
def getLongestLenMove(self, Pieces):
longestLen = 0
thePieceIndex = 0
lenIndex = 0
for pieceIndex in range(len(Pieces)):
for pieceLenIndex in range(len(Pieces[pieceIndex].lenLegalMoves)):
if Pieces[pieceIndex].lenLegalMoves[pieceLenIndex] > longestLen:
longestLen = Pieces[pieceIndex].lenLegalMoves[pieceLenIndex]
thePieceIndex = pieceIndex
lenIndex = pieceLenIndex
# print('PIECE - LEN INDEX', thePieceIndex, lenIndex)
self.longestLenMovePieceIndex = thePieceIndex
self.longestLenMove = Pieces[thePieceIndex].legalMoves[lenIndex]
def getBiggestDeltaMove(self, Pieces):
biggestDelta = 0
thePieceIndex = 0
deltaIndex = 0
for pieceIndex in range(len(Pieces)):
for pieceDeltaIndex in range(len(Pieces[pieceIndex].deltaLegalMoves)):
y, x = Pieces[pieceIndex].deltaLegalMoves[pieceDeltaIndex]
if y+x > biggestDelta:
biggestDelta = y+x
thePieceIndex = pieceIndex
deltaIndex = pieceDeltaIndex
self.biggestDeltaMovePieceIndex = thePieceIndex
self.biggestDeltaMove = Pieces[thePieceIndex].legalMoves[deltaIndex]
self.biggestDelta = biggestDelta
# ANALYZE FUNCTION
def getFurthestNearestPiece(self, Pieces):
nearest = 18
furthest = 0
for pieceIndex in range(len(Pieces)):
y, x = Pieces[pieceIndex].rangeWithDestination
if abs(y+x) > furthest:
furthest = abs(y+x)
self.furthestPiece = Pieces[pieceIndex].name
if abs(y+x) < nearest:
nearest = abs(y+x)
self.nearestPiece = Pieces[pieceIndex].name
# print("NEAREST PIECE", self.nearestPiece)
# print("FURTHEST PIECE", self.furthestPiece)
def countPiecesAtDestination(self, Pieces):
self.sumPiecesAtDestination = 0
for Piece in Pieces:
if Piece.isAtDestination:
self.sumPiecesAtDestination += 1
# print('SUM PIECES AT DESTINATION = ', self.sumPiecesAtDestination)
## SPECIAL FUNCTION FOR ENDMODE
def getNearestDestination(self, Piece, Model, destinations):
nearest = 18
nearestDestination = (10, 10)
for destination in destinations:
# print('APA ISI INI ', destination, Model.getBidak(destination[0], destination[1]))
if Model.getBidak(destination[0], destination[1]) == 0 or round(Model.getBidak(destination[0], destination[1])/100) != Piece.player :
if abs(Piece.position[0]-destination[0])+abs(Piece.position[1]-destination[0]) < nearest:
nearest = abs(Piece.position[0]-destination[0])+abs(Piece.position[1]-destination[0])
nearestDestination = destination
# print("SELECTED NEAREST DESTINATION", nearestDestination)
# print("PIECE POSITION", Piece.position)
return nearestDestination
def getMoveFromDestination(self, Piece, destination, AIvariables):
direction = self.getDirectionToTarget(Piece, destination)
yPos, xPos = Piece.position
if self.isPieceInTheSquare((yPos+direction[0], xPos+direction[1])):
if self.isPieceInTheSquare((yPos+2*direction[0], xPos+2*direction[1])) == False:
return [(yPos+direction[0]*2, xPos+direction[1]*2)], Piece.position, 1
else:
return [(yPos+direction[0], xPos+direction[1])], Piece.position, 0
if self.isPieceInTheSquare((yPos, xPos+direction[1])):
if self.isPieceInTheSquare((yPos, xPos+2*direction[1])) == False:
return [(yPos, xPos+direction[1]*2)], Piece.position, 1
else:
return [(yPos, xPos+direction[1])], Piece.position, 0
if self.isPieceInTheSquare((yPos+direction[0], xPos)):
if self.isPieceInTheSquare((yPos+2*direction[0], xPos)) == False:
return [(yPos+direction[0]*2, xPos)], Piece.position, 1
else:
return [(yPos+direction[0], xPos)], Piece.position, 0
return None, None, 2
def moveDestinationPiece(self, nomor, nearestDestination, AIvariables):
directions = AIvariables.playersDirections[((nomor+2)%4)-1]
# print("THIS IS SAI ", nearestDestination[0]+direction[0]+direction[0], nearestDestination[1]+direction[1])
for direction in directions:
if self.isSquareInBoard((nearestDestination[0]+direction[0]+direction[0], nearestDestination[1]+direction[1]+direction[1])):
if self.realBoard[nearestDestination[0]+direction[0]+direction[0]][nearestDestination[1]+direction[1]+direction[1]]//100 == nomor and self.realBoard[nearestDestination[0]+direction[0]][nearestDestination[1]+direction[1]] != 0:
return [nearestDestination], (nearestDestination[0]+direction[0]+direction[0], nearestDestination[1]+direction[1]+direction[1]), 1
if self.isSquareInBoard((nearestDestination[0]+direction[0], nearestDestination[1]+direction[1])):
if self.realBoard[nearestDestination[0]+direction[0]][nearestDestination[1]+direction[1]]//100 == nomor:
return [nearestDestination], (nearestDestination[0]+direction[0], nearestDestination[1]+direction[1]), 0
return None, None, 2
class AIvariables(object):
def __init__(self):
##### DONE #####
self.piecesNames = []
name = []
for player in range(4):
for piece in range(13):
name.append((player+1)*100+piece+1)
self.piecesNames.append(name)
name = []
# piecePosition ada di halma_model
# FORMAT (vertical or y, horizontal or x) 1 = down or right, -1 = up or left
self.playersDirections = [
[(1, 1), (1, 0), (0, 1), (1, -1), (-1, 1)],
[(1, -1), (1, 0), (0, -1), (1, 1), (-1, -1)],
[(-1, -1), (-1, 0), (0, -1), (-1, 1), (1, -1)],
[(-1, 1), (-1, 0), (0, 1), (-1, -1), (1, 1)]
]
# TESTING
# self.playersDirections = [
# [(-1,-1), (0,-1), (1,-1), (-1,0), (1,0),(-1,1), (0,1), (1,1)],
# [(-1,-1), (0,-1), (1,-1), (-1,0), (1,0),(-1,1), (0,1), (1,1)],
# [(-1,-1), (0,-1), (1,-1), (-1,0), (1,0),(-1,1), (0,1), (1,1)],
# [(-1,-1), (0,-1), (1,-1), (-1,0), (1,0),(-1,1), (0,1), (1,1)]
# ]
self.finalDestinations1 = [[9, 9], [8, 9], [9, 8], [7, 9], [8, 8],
[9, 8], [6, 9], [7, 8], [8, 7], [9, 6],
[5, 9], [8, 8], [7, 7], [8, 6], [9, 5]]
self.finalDestinations2 = [[0, 0], [1, 0], [0, 1], [2, 0], [1, 1],
[0, 2], [3, 0], [2, 1], [1, 2], [0, 3],
[4, 0], [3, 1], [2, 2], [1, 3], [0, 4]]
# diagonal = y + x
self.diagonalDest1 = [18, 17, 16, 15, 14]
self.diagonalDest2 = [0, 1, 2, 3, 4]
self.destinationPoints = [(9,9), (9,0), (0,0), (0,9)]
self.threeDestinationPoints = [
[(8, 6), (7, 7), (6, 8)],
[(8, 3), (7, 2), (6, 1)],
[(1, 3), (2, 2), (3, 1)],
[(1, 6), (2, 7), (3, 8)]
] | [
"[email protected]"
] | |
5cb1a849d825eefe595062797868f3958e10eaba | 0139244a5561b7ecee26c1c4497f36bfc27c60dc | /deeplens/object_detection/tensorflow_detect/utils/np_box_list_ops_test.py | dffede6a42f564419134a5b2b4730c7c6d2a5dae | [
"MIT"
] | permissive | sjyk/deeplens-cv | a0e94d649fcac46225c517596dbd2cefff0144dc | 8383e6c377a93304534c46e3c9fd7821cc068d98 | refs/heads/master | 2022-11-27T10:59:55.806648 | 2021-02-22T19:41:09 | 2021-02-22T19:41:09 | 209,351,898 | 11 | 9 | MIT | 2022-11-21T21:38:23 | 2019-09-18T16:12:36 | Python | UTF-8 | Python | false | false | 16,928 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.track_utils.np_box_list_ops."""
import numpy as np
import tensorflow as tf
from object_detection.tensorflow_detect.utils import np_box_list
from object_detection.tensorflow_detect.utils import np_box_list_ops
class AreaRelatedTest(tf.test.TestCase):
def setUp(self):
boxes1 = np.array([[4.0, 3.0, 7.0, 5.0], [5.0, 6.0, 10.0, 7.0]],
dtype=float)
boxes2 = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist1 = np_box_list.BoxList(boxes1)
self.boxlist2 = np_box_list.BoxList(boxes2)
def test_area(self):
areas = np_box_list_ops.area(self.boxlist1)
expected_areas = np.array([6.0, 5.0], dtype=float)
self.assertAllClose(expected_areas, areas)
def test_intersection(self):
intersection = np_box_list_ops.intersection(self.boxlist1, self.boxlist2)
expected_intersection = np.array([[2.0, 0.0, 6.0], [1.0, 0.0, 5.0]],
dtype=float)
self.assertAllClose(intersection, expected_intersection)
def test_iou(self):
iou = np_box_list_ops.iou(self.boxlist1, self.boxlist2)
expected_iou = np.array([[2.0 / 16.0, 0.0, 6.0 / 400.0],
[1.0 / 16.0, 0.0, 5.0 / 400.0]],
dtype=float)
self.assertAllClose(iou, expected_iou)
def test_ioa(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
ioa21 = np_box_list_ops.ioa(boxlist2, boxlist1)
expected_ioa21 = np.array([[0.5, 0.0],
[1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(ioa21, expected_ioa21)
def test_scale(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_scaled = np_box_list_ops.scale(boxlist, 2.0, 3.0)
expected_boxlist_scaled = np_box_list.BoxList(
np.array(
[[0.5, 0.75, 1.5, 2.25], [0.0, 0.0, 1.0, 2.25]], dtype=np.float32))
self.assertAllClose(expected_boxlist_scaled.get(), boxlist_scaled.get())
def test_clip_to_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_clipped = np_box_list_ops.clip_to_window(boxlist,
[0.0, 0.0, 1.0, 1.0])
expected_boxlist_clipped = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.0, 0.0, 0.7, 1.0]],
dtype=np.float32))
self.assertAllClose(expected_boxlist_clipped.get(), boxlist_clipped.get())
def test_prune_outside_window(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[-0.2, -0.3, 0.7, 1.5]],
dtype=np.float32))
boxlist_pruned, _ = np_box_list_ops.prune_outside_window(
boxlist, [0.0, 0.0, 1.0, 1.0])
expected_boxlist_pruned = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
self.assertAllClose(expected_boxlist_pruned.get(), boxlist_pruned.get())
def test_concatenate(self):
boxlist1 = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist2 = np_box_list.BoxList(
np.array(
[[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]], dtype=np.float32))
boxlists = [boxlist1, boxlist2]
boxlist_concatenated = np_box_list_ops.concatenate(boxlists)
boxlist_concatenated_expected = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75],
[0.5, 0.25, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
dtype=np.float32))
self.assertAllClose(boxlist_concatenated_expected.get(),
boxlist_concatenated.get())
def test_change_coordinate_frame(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist_coord = np_box_list_ops.change_coordinate_frame(
boxlist, np.array([0, 0, 0.5, 0.5], dtype=np.float32))
expected_boxlist_coord = np_box_list.BoxList(
np.array([[0.5, 0.5, 1.5, 1.5], [0, 0, 1.0, 1.5]], dtype=np.float32))
self.assertAllClose(boxlist_coord.get(), expected_boxlist_coord.get())
def test_filter_scores_greater_than(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=
np.float32))
boxlist.add_field('scores', np.array([0.8, 0.2], np.float32))
boxlist_greater = np_box_list_ops.filter_scores_greater_than(boxlist, 0.5)
expected_boxlist_greater = np_box_list.BoxList(
np.array([[0.25, 0.25, 0.75, 0.75]], dtype=np.float32))
self.assertAllClose(boxlist_greater.get(), expected_boxlist_greater.get())
class GatherOpsTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_gather_with_out_of_range_indices(self):
indices = np.array([3, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_with_invalid_multidimensional_indices(self):
indices = np.array([[0, 1], [1, 2]], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices)
def test_gather_without_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices)
expected_scores = np.array([0.9, 0.5, 0.7], dtype=float)
self.assertAllClose(expected_scores, subboxlist.get_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
def test_gather_with_invalid_field_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, 'labels')
with self.assertRaises(ValueError):
np_box_list_ops.gather(boxlist, indices, ['objectness'])
def test_gather_with_fields_specified(self):
indices = np.array([2, 0, 1], dtype=int)
boxlist = self.boxlist
subboxlist = np_box_list_ops.gather(boxlist, indices, ['labels'])
self.assertFalse(subboxlist.has_field('scores'))
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0], [3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0]],
dtype=float)
self.assertAllClose(expected_boxes, subboxlist.get())
expected_labels = np.array([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0],
[0, 1, 0, 0, 0]],
dtype=int)
self.assertAllClose(expected_labels, subboxlist.get_field('labels'))
class SortByFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
self.boxlist.add_field('scores', np.array([0.5, 0.9, 0.4], dtype=float))
self.boxlist.add_field('labels',
np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0],
[0, 0, 0, 0, 1]],
dtype=int))
def test_with_invalid_field(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'objectness')
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'labels')
def test_with_invalid_sorting_order(self):
with self.assertRaises(ValueError):
np_box_list_ops.sort_by_field(self.boxlist, 'scores', 'Descending')
def test_with_descending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(self.boxlist, 'scores')
expected_boxes = np.array([[14.0, 14.0, 15.0, 15.0], [3.0, 4.0, 6.0, 8.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.9, 0.5, 0.4], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
def test_with_ascending_sorting(self):
sorted_boxlist = np_box_list_ops.sort_by_field(
self.boxlist, 'scores', np_box_list_ops.SortOrder.ASCEND)
expected_boxes = np.array([[0.0, 0.0, 20.0, 20.0],
[3.0, 4.0, 6.0, 8.0],
[14.0, 14.0, 15.0, 15.0],],
dtype=float)
self.assertAllClose(expected_boxes, sorted_boxlist.get())
expected_scores = np.array([0.4, 0.5, 0.9], dtype=float)
self.assertAllClose(expected_scores, sorted_boxlist.get_field('scores'))
class NonMaximumSuppressionTest(tf.test.TestCase):
def setUp(self):
self._boxes = np.array([[0, 0, 1, 1],
[0, 0.1, 1, 1.1],
[0, -0.1, 1, 0.9],
[0, 10, 1, 11],
[0, 10.1, 1, 11.1],
[0, 100, 1, 101]],
dtype=float)
self._boxlist = np_box_list.BoxList(self._boxes)
def test_with_no_scores_field(self):
boxlist = np_box_list.BoxList(self._boxes)
max_output_size = 3
iou_threshold = 0.5
with self.assertRaises(ValueError):
np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
def test_nms_disabled_max_output_size_equals_three(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 1. # No NMS
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 0.1, 1, 1.1]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .2, .3], dtype=float))
max_output_size = 3
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_two_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 2
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_at_most_thirty_from_three_clusters(self):
boxlist = np_box_list.BoxList(self._boxes)
boxlist.add_field('scores',
np.array([.9, .75, .6, .95, .5, .3], dtype=float))
max_output_size = 30
iou_threshold = 0.5
expected_boxes = np.array([[0, 10, 1, 11], [0, 0, 1, 1], [0, 100, 1, 101]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_select_from_ten_indentical_boxes(self):
boxes = np.array(10 * [[0, 0, 1, 1]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array(10 * [0.8]))
iou_threshold = .5
max_output_size = 3
expected_boxes = np.array([[0, 0, 1, 1]], dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_different_iou_threshold(self):
boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
boxlist.add_field('scores', np.array([0.9, 0.8, 0.7, 0.6]))
max_output_size = 4
iou_threshold = .4
expected_boxes = np.array([[0, 0, 20, 100],
[200, 200, 210, 300],],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .5
expected_boxes = np.array([[0, 0, 20, 100], [200, 200, 210, 300],
[200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
iou_threshold = .8
expected_boxes = np.array([[0, 0, 20, 100], [0, 0, 20, 80],
[200, 200, 210, 300], [200, 200, 210, 250]],
dtype=float)
nms_boxlist = np_box_list_ops.non_max_suppression(
boxlist, max_output_size, iou_threshold)
self.assertAllClose(nms_boxlist.get(), expected_boxes)
def test_multiclass_nms(self):
boxlist = np_box_list.BoxList(
np.array(
[[0.2, 0.4, 0.8, 0.8], [0.4, 0.2, 0.8, 0.8], [0.6, 0.0, 1.0, 1.0]],
dtype=np.float32))
scores = np.array([[-0.2, 0.1, 0.5, -0.4, 0.3],
[0.7, -0.7, 0.6, 0.2, -0.9],
[0.4, 0.34, -0.9, 0.2, 0.31]],
dtype=np.float32)
boxlist.add_field('scores', scores)
boxlist_clean = np_box_list_ops.multi_class_non_max_suppression(
boxlist, score_thresh=0.25, iou_thresh=0.1, max_output_size=3)
scores_clean = boxlist_clean.get_field('scores')
classes_clean = boxlist_clean.get_field('classes')
boxes = boxlist_clean.get()
expected_scores = np.array([0.7, 0.6, 0.34, 0.31])
expected_classes = np.array([0, 2, 1, 4])
expected_boxes = np.array([[0.4, 0.2, 0.8, 0.8],
[0.4, 0.2, 0.8, 0.8],
[0.6, 0.0, 1.0, 1.0],
[0.6, 0.0, 1.0, 1.0]],
dtype=np.float32)
self.assertAllClose(scores_clean, expected_scores)
self.assertAllClose(classes_clean, expected_classes)
self.assertAllClose(boxes, expected_boxes)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
66c5ff6582c9ed9bc4af5f1ff227ef25c5f8b9ca | 35d1b988b4ea391ed648f46ec00c13e9ab6cd9d8 | /salt/modules/ssh.py | fd67e49953513f45222220088b57fe3bb4a07a5c | [
"Apache-2.0"
] | permissive | atoponce/salt | 2ceb247433e6d7b8401a9e3c501ea75e89f798b2 | 19764c5b78820f81cdb8f7f429feb67a8859b692 | refs/heads/master | 2021-01-18T07:24:27.763822 | 2011-10-04T03:46:11 | 2011-10-04T03:46:11 | 2,509,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,099 | py | '''
Manage client ssh components
'''
import os
def _refine_enc(enc):
'''
Return the properly formatted ssh value for the authorized encryption key
type. If the type is not found, return ssh-rsa, the ssh default.
'''
rsa = ['r', 'rsa', 'ssh-rsa']
dss = ['d', 'dsa', 'dss', 'ssh-dss']
if rsa.count(enc):
return 'ssh-rsa'
elif dss.count(enc):
return 'ssh-dss'
else:
return 'ssh-rsa'
def _format_auth_line(
key,
enc,
comment,
options):
line = ''
if options:
line += '{0} '.format(','.join(options))
line += '{0} {1} {2}'.format(enc, key, comment)
return line
def _replace_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Replace an existing key
'''
auth_line = _format_auth_line(
key,
enc,
comment,
options)
lines = []
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
key_ind = 1
if not comps[0].startswith('ssh-'):
key_ind = 2
if comps[key_ind] == key:
lines.append(auth_line)
else:
lines.append(line)
open(full, 'w+').writelines(lines)
def host_keys(keydir=None):
'''
Return the minion's host keys
CLI Example:
salt '*' ssh.host_keys
'''
# Set up the default keydir - needs to support sshd_config parsing in the
# future
if not keydir:
if __grains__['Linux']:
keydir = '/etc/ssh'
keys = {}
for fn_ in os.listdir(keydir):
if fn_.startswith('ssh_host_'):
top = fn_.split('.')
comps = fn_.split('_')
kname = comps[2]
if len(top) > 1:
kname += '.{0}'.format(top[1])
try:
keys[kname] = open(os.path.join(keydir, fn_), 'r').read()
except:
keys[kname] = ''
return keys
def auth_keys(user, config='.ssh/authorized_keys'):
'''
Return the authorized keys for the specified user
CLI Example:
salt '*' ssh.auth_keys root
'''
ret = {}
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return {}
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
continue
if not comps[0].startswith('ssh-'):
# It has options, grab them
options = comps[0].split(',')
else:
options = []
if not options:
enc = comps[0]
key = comps[1]
comment = ' '.join(comps[2:])
else:
enc = comps[1]
key = comps[2]
comment = ' '.join(comps[3:])
ret[key] = {'enc': enc,
'comment': comment,
'options': options}
return ret
def rm_auth_key(user, key, config='.ssh/authorized_keys'):
'''
Remove an authorized key from the specified user's authorized key file
CLI Example:
salt '*' ssh.rm_auth_key <user> <key>
'''
current = auth_keys(user, config)
if current.has_key(key):
# Remove the key
uinfo = __salt__['user.info'](user)
full = os.path.join(uinfo['home'], config)
if not os.path.isfile(full):
return 'User authorized keys file not present'
lines = []
for line in open(full, 'r').readlines():
if line.startswith('#'):
# Commented Line
lines.append(line)
continue
comps = line.split()
if len(comps) < 2:
# Not a valid line
lines.append(line)
continue
if not comps[0].startswith('ssh-'):
# It has options, grab them
options = comps[0].split(',')
else:
options = []
if not options:
pkey = comps[1]
else:
pkey = comps[2]
if pkey == key:
continue
else:
lines.append(line)
open(full, 'w+').writelines(lines)
return 'Key removed'
return 'Key not present'
def set_auth_key(
user,
key,
enc='ssh-rsa',
comment='',
options=[],
config='.ssh/authorized_keys'):
'''
Add a key to the authorized_keys file
CLI Example:
salt '*' ssh.set_auth_key <user> <key> dsa '[]' .ssh/authorized_keys
'''
enc = _refine_enc(enc)
ret = ''
replace = False
uinfo = __salt__['user.info'](user)
current = auth_keys(user, config)
if current.has_key(key):
if not set(current['options']) == set(options):
replace = True
if not current['enc'] == enc:
replace = True
if not current['comment'] == comment:
if comment:
replace = True
if replace:
_replace_auth_key(
user,
key,
enc,
comment,
options,
config)
return 'replace'
else:
return 'no change'
else:
auth_line = _format_auth_line(
key,
enc,
comment,
options)
open(
os.path.join(uinfo['home'], config), 'a+').write(
'\n{0}'.format(auth_line))
return 'new'
| [
"[email protected]"
] | |
dfc5a128780d692bffb8e8c4f0479adb3a7df600 | d20184bce93f6d4da8e4e1b430a0d9828acc7459 | /tensorflow_datasets/core/features/image_feature_test.py | 053951347dc5ef224d171666ddbcf97f01f57fde | [
"Apache-2.0"
] | permissive | abiraja2004/datasets | 0bf6da2aaf86f332b54b1bcc0bfbd58eea4b2a7b | 13b5287be1c400563d559384bd8e6d4d0244ba85 | refs/heads/master | 2020-04-05T12:17:02.018945 | 2018-11-09T11:51:19 | 2018-11-09T11:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,983 | py | # coding=utf-8
# Copyright 2018 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.features.image_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core import features
from tensorflow_datasets.core import test_utils
class ImageFeatureTest(tf.test.TestCase):
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def test_encode_decode(self):
specs = features.SpecDict({
'img': features.Image(),
'img_shaped': features.Image(shape=(32, 64, 3)),
'img_file': features.Image(),
})
img = np.random.randint(256, size=(128, 100, 3), dtype=np.uint8)
img_shaped = np.random.randint(256, size=(32, 64, 3), dtype=np.uint8)
img_file_path = os.path.join(os.path.dirname(__file__),
'../test_data/6pixels.png')
img_file_expected_content = [ # see tests_data/README.md
[[0, 255, 0], [255, 0, 0], [255, 0, 255]],
[[0, 0, 255], [255, 255, 0], [126, 127, 128]],
]
decoded_sample = test_utils.features_encode_decode(specs, {
'img': img,
'img_shaped': img_shaped,
'img_file': img_file_path,
})
self.assertAllEqual(decoded_sample['img'], img)
self.assertAllEqual(decoded_sample['img_shaped'], img_shaped)
self.assertAllEqual(decoded_sample['img_file'], img_file_expected_content)
# 'img' shape can be dynamic
img2 = np.random.randint(256, size=(64, 200, 3), dtype=np.uint8)
decoded_sample = test_utils.features_encode_decode(specs, {
'img': img2,
'img_shaped': img_shaped,
'img_file': img_file_path,
})
self.assertAllEqual(decoded_sample['img'], img2)
# 'img_shaped' shape should be static
img_shaped2 = np.random.randint(256, size=(31, 64, 3), dtype=np.uint8)
with self.assertRaisesWithPredicateMatch(ValueError, 'are incompatible'):
test_utils.features_encode_decode(specs, {
'img': img2,
'img_shaped': img_shaped2,
'img_file': img_file_path,
})
@tf.contrib.eager.run_test_in_graph_and_eager_modes()
def test_wrong_input(self):
specs = features.SpecDict({
'img': features.Image(),
})
# Correct shape/type should succeed
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 3), dtype=np.uint8),
})
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(64, 64, 3), dtype=np.uint8),
})
# Invalid type
with self.assertRaisesWithPredicateMatch(ValueError, 'should be uint8'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 3), dtype=np.uint32),
})
# Invalid number of dimensions
with self.assertRaisesWithPredicateMatch(ValueError,
'must have the same rank'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128), dtype=np.uint8),
})
# Invalid number of channels
with self.assertRaisesWithPredicateMatch(ValueError, 'are incompatible'):
test_utils.features_encode_decode(specs, {
'img': np.random.randint(256, size=(128, 128, 1), dtype=np.uint8),
})
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
c648b82f5a7fde552e18e089edae3470823c484a | 2a9e31f2dc50474cc33ea85a2b4dcb23b3a8b737 | /raven/utils/stacks.py | 57030b90849632bc56bab087fe41faf7aaf6a7fd | [
"BSD-3-Clause"
] | permissive | mitsuhiko/raven | 39ce3971adc90da5544251a58baac63fbf031c8a | 3d9bd01e2881f58c7f9156c1fd243569547840ed | refs/heads/master | 2023-06-08T17:51:57.663446 | 2011-10-12T01:47:04 | 2011-10-12T01:47:04 | 2,559,563 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,505 | py | """
raven.utils.stacks
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import inspect
import re
from raven.utils.encoding import transform
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
# Traceback (most recent call last):
# File "/Users/dcramer/Development/django-sentry/sentry/client/handlers.py", line 31, in emit
# get_client().create_from_record(record, request=request)
# File "/Users/dcramer/Development/django-sentry/sentry/client/base.py", line 325, in create_from_record
# data['__sentry__']['frames'] = varmap(shorten, get_stack_info(stack))
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 112, in get_stack_info
# pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
# File "/Users/dcramer/Development/django-sentry/sentry/utils/stacks.py", line 24, in get_lines_from_file
# source = loader.get_source(module_name)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 287, in get_source
# fullname = self._fix_name(fullname)
# File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pkgutil.py", line 262, in _fix_name
# "module %s" % (self.fullname, fullname))
# ImportError: Loader for module cProfile cannot handle module __main__
source = None
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (OSError, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = _coding_re.search(line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context
def get_culprit(frames, include_paths=[], exclude_paths=[]):
# We iterate through each frame looking for a deterministic culprit
# When one is found, we mark it as last "best guess" (best_guess) and then
# check it against ``exclude_paths``. If it isnt listed, then we
# use this option. If nothing is found, we use the "best guess".
best_guess = None
culprit = None
for frame in frames:
try:
culprit = '.'.join([frame['module'], frame['function']])
except KeyError:
continue
if any((culprit.startswith(k) for k in include_paths)):
if not (best_guess and any((culprit.startswith(k) for k in exclude_paths))):
best_guess = culprit
elif best_guess:
break
# Return either the best guess or the last frames call
return best_guess or culprit
def iter_traceback_frames(tb):
while tb:
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if not tb.tb_frame.f_locals.get('__traceback_hide__'):
yield tb.tb_frame
tb = tb.tb_next
def iter_stack_frames():
for frame_crud in inspect.stack()[1:]:
yield frame_crud[0]
def get_stack_info(frames):
results = []
for frame in frames:
# Support hidden frames
if frame.f_locals.get('__traceback_hide__'):
continue
filename = frame.f_code.co_filename
function = frame.f_code.co_name
lineno = frame.f_lineno - 1
loader = frame.f_globals.get('__loader__')
module_name = frame.f_globals.get('__name__')
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno, 7, loader, module_name)
if pre_context_lineno is not None:
results.append({
'id': id(frame),
'filename': filename,
'module': module_name,
'function': function,
'lineno': lineno + 1,
# TODO: vars need to be references
'vars': transform(frame.f_locals.items()),
'pre_context': pre_context,
'context_line': context_line,
'post_context': post_context,
'pre_context_lineno': pre_context_lineno + 1,
})
return results
| [
"[email protected]"
] | |
43fe0f6922d3ef40f54bcd87b20dd121739b4deb | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/KISS/testcase/firstcases/testcase9_020.py | 03e1caf70102b771e968fc164b13a7e026557106 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,110 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'fr.neamar.kiss',
'appActivity' : 'fr.neamar.kiss.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'fr.neamar.kiss/fr.neamar.kiss.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase020
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"fr.neamar.kiss:id/launcherButton\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.8, 0.5, 0.2)
element = getElememt(driver, "new UiSelector().resourceId(\"fr.neamar.kiss:id/item_app_icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"9_020\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'fr.neamar.kiss'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
bc918e41ae9466ad97805350c85c83c738ad612c | b3e525a3c48800303019adac8f9079109c88004e | /iota/test/iris/testcases/swm/mode_switch.py | 2163fc162470d0451dfa579e13746f89f9d2aa70 | [] | no_license | PsymonLi/sw | d272aee23bf66ebb1143785d6cb5e6fa3927f784 | 3890a88283a4a4b4f7488f0f79698445c814ee81 | refs/heads/master | 2022-12-16T21:04:26.379534 | 2020-08-27T07:57:22 | 2020-08-28T01:15:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,212 | py | #! /usr/bin/python3
import paramiko
import logging
import sys
import time
import traceback
import iota.harness.api as api
from iota.harness.thirdparty.redfish import redfish_client
from iota.test.utils.redfish.ping import ping
from iota.test.utils.redfish.ncsi_ops import check_set_ncsi
from iota.test.utils.redfish.nic_ops import get_nic_mode
from iota.test.utils.redfish.ncsi_ops import set_ncsi_mode
from iota.test.utils.redfish.dedicated_mode_ops import set_dedicated_mode
from iota.test.utils.redfish.common import get_redfish_obj
def Setup(tc):
naples_nodes = api.GetNaplesNodes()
if len(naples_nodes) == 0:
api.Logger.error("No naples node found")
return api.types.status.ERROR
tc.test_node = naples_nodes[0]
tc.node_name = tc.test_node.Name()
cimc_info = tc.test_node.GetCimcInfo()
if not cimc_info:
api.Logger.error("CimcInfo is None, exiting")
return api.types.status.ERROR
tc.ilo_ip = cimc_info.GetIp()
tc.ilo_ncsi_ip = cimc_info.GetNcsiIp()
tc.cimc_info = cimc_info
if ping(tc.ilo_ip, 3) == api.types.status.SUCCESS:
tc.initial_mode = "dedicated"
elif ping(tc.ilo_ncsi_ip, 3) == api.types.status.SUCCESS:
tc.initial_mode = "ncsi"
else:
api.Logger.error('ILO unreachable')
return api.types.status.FAILURE
return api.types.status.SUCCESS
def Trigger(tc):
max_pings = int(getattr(tc.args, "max_pings", 60))
mode = tc.initial_mode
try:
for _i in range(tc.iterators.count):
RF = get_redfish_obj(tc.cimc_info, mode=mode)
obs_mode = get_nic_mode(RF)
api.Logger.info("Iteration %d: curr_mode %s" % (_i, obs_mode))
if mode != obs_mode:
raise RuntimeError("Expected NIC mode %s, observed %s" % (mode, obs_mode))
next_mode = "dedicated" if mode == "ncsi" else "ncsi"
if next_mode == "ncsi":
ret = set_ncsi_mode(RF, mode="dhcp")
else:
ret = set_dedicated_mode(RF, mode="dhcp")
if ret != api.types.status.SUCCESS:
api.Logger.error("Mode switch from %s -> %s failed" %(mode, next_mode))
return api.types.status.FAILURE
api.Logger.info("Switched mode to %s" % (next_mode))
time.sleep(5)
if ret == api.types.status.SUCCESS:
curr_ilo_ip = tc.ilo_ip if next_mode == "dedicated" else tc.ilo_ncsi_ip
ret = ping(curr_ilo_ip, max_pings)
if ret != api.types.status.SUCCESS:
RF.logout()
raise RuntimeError('Unable to ping ILO, Port Switch fail from'
' %s -> %s' % (mode, next_mode))
api.Logger.info("Mode switch from %s -> %s successful" % (mode, next_mode))
else:
raise RuntimeError('Mode switch config failed')
mode = next_mode
except:
api.Logger.error(traceback.format_exc())
return api.types.status.FAILURE
return api.types.status.SUCCESS
def Verify(tc):
return api.types.status.SUCCESS
def Teardown(tc):
return api.types.status.SUCCESS
| [
"[email protected]"
] | |
60cd5aa547cedf3c4161dfe2689d0b25e1907946 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /nm8zFcqcQ9Rzu45Fm_8.py | 6f23ec4930e6f866f2cf4c94d2f9d47afbdc03e8 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py |
def bridge_shuffle(lst1, lst2):
suffled_list, i = [], 0
while True:
if i >= len(lst1):
return suffled_list + lst2[i:]
if i >= len(lst2):
return suffled_list + lst1[i:]
suffled_list += [lst1[i]] + [lst2[i]]
i += 1
| [
"[email protected]"
] | |
b9760bc5d28254cf3a26ee63a1685d0d8051e09d | 00f1d6f2998e404deb42c43b3b03c8089b263568 | /CCV4-adv/week4/lanhuajian/loss.py | 05ed3934181726fa4c5cd98aa89fed59991ec0d3 | [] | no_license | xiashiwendao/kaikeba | 430bdfa4a0b99700fcbe76fce9118791917ade13 | 986f72e3f4eb9d210ebf6b46d9cace6c24353865 | refs/heads/master | 2022-12-26T15:41:06.842476 | 2020-10-10T06:20:47 | 2020-10-10T06:20:47 | 293,436,997 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | import torch
from torch import nn
# 二分类focal loss
class BCEFocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=0.25, reduction="elementwise_mean"):
super().__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
def forward(self, pt, target):
alpha = self.alpha
loss = - alpha * (1 - pt) ** self.gamma * target * torch.log(pt) - \
(1 - alpha) * pt ** self.gamma * (1 - target) * torch.log(1 - pt)
if self.reduction == "elementwise_mean":
loss = torch.mean(loss)
elif self.reduction == "sum":
loss = torch.sum(loss)
return loss | [
"[email protected]"
] | |
3189a9bddb5e0869d4c2a3fd33df5916788f3680 | b403c7fe56209472855dff451f0b6283d5471008 | /Supplemental_Material/PythonProjects/13. PYGAME/Pygames/pygame1.py | cb258cdafed5b51868ddb1c79da280eb4fc9e0e4 | [] | no_license | Sandbox4KidsTM/Python_Basics | 842bde52796896e913fdb5cc349034c52092555f | 68c95547ec1567958fc8069e6a4bb119e436211a | refs/heads/master | 2020-03-23T01:06:29.363196 | 2018-08-10T04:32:58 | 2018-08-10T04:32:58 | 140,901,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | #sentdex YouTube Video Tutorial Series
#first pygame with basic window
import pygame #import pygame module
pygame.init() #start pygame #pygame is an instance, hence need to instantiate it
gameDisplay = pygame.display.set_mode((800, 600)) #resolution of the game window
#set_mode() expects a tuple, not 2 parameters, hence (800,600) is a tuple
pygame.display.set_caption('A racing game') #title of the game window
clock = pygame.time.Clock() #clock for game
crashed = False
while not crashed:
for event in pygame.event.get(): #gets all the events per frame per second
if event.type == pygame.QUIT:
crashed = True #break out of the while loop
print(event) #print all the events in the console
pygame.display.update() #update the screen #udpates just the current window
# alternate is pygame.display.flip() #updates the entire surface (all windows)
clock.tick(60) #how fast do you want to want to display in frames per second
pygame.quit() #end pygame
quit()
| [
"[email protected]"
] | |
a317c7c7a2454ad9c8e3d5914e0c7b875ea45f70 | 13a4df75e81ee4330a197340a300ec0755247a93 | /aKindOfQuestion/5.arithmeticRobot.py | 1b2226136d037872506976d20fe89461be50c385 | [] | no_license | ltfafei/py_Leetcode_study | d22955380bf9f134bc9cb215fea73ec4f9ea94cf | 0fd1bca56a621001cf9093f60941c4bfed4c79a5 | refs/heads/master | 2023-07-13T18:15:59.098314 | 2021-08-30T15:11:17 | 2021-08-30T15:11:17 | 363,597,757 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | #!/usr/bin/python3
#-*- coding: UTF-8 -*-
#Author: afei00123
'''
5、算术机器人
现在利用编程设计一个简单的算术机器人,只需输入两个数字,在输入一个指令,机器人可计算并将结果返回。
输入的数值可以是任意整数,假设为X和Y,计算指令为A和B。A指令进行如下运算:
X*2+Y
Y指令表示如下运算:
Y*2+X
如果输入指令:AB,则先进行A指令运算,再进行B指令运算。最终将运算结果返回。
'''
def arithRobot(x, y, s):
n = 0
for c in s:
if c == "A":
n += 2 * x + y
else:
n += 2 * y + x
return n
print(arithRobot(3, 9, "AB"))
'''
Output result:
36
''' | [
"[email protected]"
] | |
f8b84d578eefdacabcc837f5e63de75ee534d5f2 | b8273daac56c6bb1e2cf5fba8bfe000a27f9ae6f | /src/optimal_root_model.py | 0d0019148c5c0ee839f3f67bd6a304759dcd96fd | [] | no_license | jgomezdans/GDAY | 0d117fc5bd27f2b3356192df22cb683970975711 | 66fea12aa4d454858be137530acd230d5e2834bf | refs/heads/master | 2021-01-24T01:22:33.973009 | 2013-02-14T04:53:10 | 2013-02-14T04:53:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,831 | py | """ Really need to fix these units so that there are consistent with G'DAY!!!"""
from math import exp
from utilities import float_ne
__author__ = "Martin De Kauwe"
__version__ = "1.0 (26.10.2012)"
__email__ = "[email protected]"
class RootingDepthModel(object):
""" Ross's Optimal rooting depth model.
Optimisation hypothesis = maximise total N uptake by determining optimal
distribution of root mass with soil depth and optimal root depth
References:
----------
* McMurtire, R. et al (2011) Increased nitrogen-uptake efficiency at
elevated CO2 explained by an hypothesis of optimal root function. Ecology
and Evolution, 2, 1235--1250
"""
def __init__(self, d0, r0, top_soil_depth):
"""
Parameters:
-----------
d0 : float
Length scale for exponential decline of Umax(z)
r0 : float
root C at half-maximum N uptake (kg C/m3)
top_soil_depth : float
depth of soil assumed by G'DAY, note Ross comment about 30 cm
[email]
"""
self.d0 = d0
self.r0 = r0
self.top_soil_depth = top_soil_depth
def main(self, rtoti=None, nsupply=None, depth_guess=None):
"""
Parameters:
-----------
rtoti : float
Initial fine root C mass -> from G'DAY [kg m-2] -> paper says DM?!
depth_guess : float
Initial guess at the rooting depth, used as the first point in the
root depth optimisation scheme [m].
Returns:
--------
root_depth : float
rooting depth [m]
nuptake : float
N uptake from roots [gN m-2 yr-1]
rabove : float
"""
# step 2: determine maximum rooting depth for model for a value rtoti
# from G'DAY
root_depth = self.estimate_max_root_depth(rtoti, depth_guess)
# step 6: calculate plant N uptake -> eqn B8.
nuptake = self.calc_plant_nuptake(root_depth, nsupply)
# step 7: daily root litter calculation. G'DAY requires root litter
# input to the top 30 cm of soil, so the G'DAY needs changing. So
# mortality below 30 cm should be ignored. Ross assumes that root
# longevity and root N:C are independent of depth, thus avoiding the
# integrals
rabove = self.calculate_root_mass_above_depth(rtoti, root_depth)
return (root_depth, nuptake, rabove)
def estimate_max_root_depth(self, rtoti, depth_guess):
""" Determing the maximum rooting depth through solving Eqn. B6. for
rooting depth
Parameters:
-----------
dmax_iteration : float
An iteration of the rooting depth defined by the optimisation scheme
[NOT USED]
rtoti : float
Initial fine root root C mass [from G'DAY]
[kg m-2] -> paper says DM?!
r0 : float
Root C at half-max N uptake. [NOT USED]
d0 : float
Length scale for exponential decline of Umax(z). [NOT USED]
depth_guess : float
initial starting guess at the root depth [m]
Returns:
--------
rooting_depth : float
optimised rooting depth [m]
"""
root_depth = newton(self.rtot_wrapper, self.rtot_derivative,
depth_guess, args=(rtoti, self.r0, self.d0))
# check optimised value is sensible?
min_rtot = round(self.rtot(root_depth, rtoti, self.r0, self.d0), 4)
gday_rtot = round(rtoti, 4)
if float_ne(min_rtot, gday_rtot):
msg = "Error, rtot supplied = %f but min = %f" % (rtoti, smin_rtot)
raise RuntimeError(msg)
return root_depth
def rtot_wrapper(self, *args):
""" Wrapper method that calls rtot, but subtracts rtoti from the result
to give you the rooting depth
Parameters:
-----------
args[1] : float
Initial fine root root C mass [from G'DAY], rtoti
self.rtot : function
call rtot function to estimate a value of rtot given a rooting
depth iteration
Returns:
--------
val : float
A optimised rooting depth iteration
"""
return self.rtot(*args) - args[1]
def rtot(self, *args):
""" Estimate the total root biomass per unit ground area, i.e. the
integral of root mass per unit soil volume, R(z), over depth z from the
soil surface to the maximim rooting depth, dmax.
Parameters:
-----------
dmax : float
Rooting depth [m]
rtoti : float
Initial fine root root C mass [from G'DAY]
[kg m-2] -> paper says DM?!
r0 : float
Root C at half-max N uptake.
d0 : float
Length scale for exponential decline of Umax(z)
Returns:
--------
rtot : float
Total root C mass given a rooting depth
"""
(dmax, rtoti, r0, d0) = args
return r0 * (2.0 * d0 * (exp(dmax / (2.0 * d0)) - 1.0) - dmax)
def rtot_derivative(self, *args):
""" Derivative of maximum root depth equation, rtot
Parameters:
-----------
dmax : float
Rooting depth [m]
rtoti : float
Initial fine root root C mass [from G'DAY]
[kg m-2] -> paper says DM?!
r0 : float
Root C at half-max N uptake.
d0 : float
Length scale for exponential decline of Umax(z)
Returns:
--------
val : float
derivative of rtot
"""
(dmax, rtoti, r0, d0) = args
return r0 * (exp(0.5 * dmax / d0) - 1.0)
def calculate_root_mass_above_depth(self, rtoti, root_depth):
""" Estimate cumulative root mass above depth, 30 cm for the G'DAY model
Parameters
----------
rtoti : float
Initial fine root root C mass [from G'DAY]
[kg m-2] -> paper says DM?!
root_depth : float
model rooting depth (m)
Returns
-------
val : float
cumulative root C mass above soil depth assumed by G'DAY model, 30cm
"""
arg1 = rtoti + 2.0 * self.r0 * self.d0 + root_depth * self.r0
arg2 = 1.0 - exp(-self.top_soil_depth / (2.0 * self.d0))
return arg1 * arg2 - self.r0 * self.top_soil_depth
def calc_plant_nuptake(self, *args):
""" Plant N uptake as a func of maximum rooting depth, eqn B8
Parameters
----------
root_depth : float
max rooting depth [m]
z : float
incremental depth provided by integration func
nsupply : float
soil N supply rate to plants per day [N/m2]
top_soil_depth : float
Depth of soil assumed by G'DAY model [m]
Returns
-------
nuptake : float
plant N uptake
"""
(root_depth, nsupply) = args
arg1 = nsupply / (1.0 - exp(-self.top_soil_depth / self.d0))
arg2 = (1.0 - exp(-root_depth / (2.0 * self.d0)))**2
return arg1 * arg2
def newton(f, fprime, x0, args=(), tol=1E-6, maxiter=250):
""" Newton-Raphson: finds a zero of the func, given an inital guess
Parameters
----------
f : function
The function whose zero is wanted.
x0 : float
An initial guess.
fprime : function
The derivative of the function
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
Returns
-------
val : float
Estimated location where function is zero
"""
for iter in xrange(maxiter):
myargs = (x0,) + args
dx = f(*myargs) / fprime(*myargs)
x = x0 - dx
if abs(x - x0) < tol:
return x
x0 = x
raise RuntimeError, "No minimum found after %d iterations" % maxiter
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Create the PdfPages object to which we will save the pages:
pdf = PdfPages('/Users/mdekauwe/Desktop/root_model_test.pdf')
RM = RootingDepthModel(d0=0.35, r0=.1325, top_soil_depth=0.3)
rtot = np.linspace(0, 1.2, 100)
nsupply = np.linspace(0.1, 10.0, 5)
for nsup in nsupply:
dmax = np.zeros(0)
for rt in rtot:
(root_depth, nuptake, rabove) = RM.main(rt, nsup, depth_guess=1.0)
dmax = np.append(dmax, root_depth)
plt.plot(rtot*2., dmax, label="N supply = %.2f" %(nsup))
plt.ylabel("Maximum rooting depth (D$_{max}$, m)")
plt.xlabel("Total root mass (R$_{tot}$, kg DM m$^{-2}$)")
plt.title("d$_0$ = 0.35, r$_0$ = 0.1325, top_soil_depth = 0.3")
plt.rcParams.update({'legend.fontsize': 8})
plt.legend(numpoints=1, loc="best")
pdf.savefig()
plt.clf()
for nsup in nsupply:
nup = np.zeros(0)
for rt in rtot:
(root_depth, nuptake, rabove) = RM.main(rt, nsup, depth_guess=1.0)
nup = np.append(nup, nuptake)
plt.plot(rtot*2., nup, label="N supply = %.2f" %(nsup))
plt.legend(numpoints=1, loc="best")
plt.ylabel("N Uptake (g N m$^{-2}$)")
plt.xlabel("Total root mass (R$_{tot}$, kg DM m$^{-2}$)")
plt.title("d$_0$ = 0.35, r$_0$ = 0.1325, top_soil_depth = 0.3")
plt.rcParams.update({'legend.fontsize': 8})
pdf.savefig()
plt.clf()
zval = np.linspace(0.05, .99, 5)
for zv in zval:
RM = RootingDepthModel(d0=zv, r0=.1325, top_soil_depth=0.3)
nup = np.zeros(0)
for rt in rtot:
(root_depth, nuptake, rabove) = RM.main(rt, nsup, depth_guess=1.0)
nup = np.append(nup, nuptake)
plt.plot(rtot*2., nup, label="soil depth (Z) = %.2f" %(zv))
plt.legend(numpoints=1, loc="best")
plt.ylabel("N Uptake (g N m$^{-2}$)")
plt.xlabel("Total root mass (R$_{tot}$, kg DM m$^{-2}$)")
plt.title("d$_0$ = see legend, r$_0$ = 0.1325, top_soil_depth = 0.3")
plt.rcParams.update({'legend.fontsize': 8})
pdf.savefig()
plt.clf()
# Remember to close the object
pdf.close()
| [
"[email protected]"
] | |
2b6a99e9ca658e6f1cc574e56292dd95a63adb96 | 3b98ee18977177e10b57e6162a03204e3774d3b8 | /Kirk_Byers_python_for_network_engineers/week2/week2exercise1.py | 7ed21f9bba6e7484fd3a5b58b9e2f55c26fba027 | [] | no_license | mattmiller87/practice | 0a3d1cae1283abb683dfab0af86e6c569a6104e1 | 9655a8020038e0f6dfe8df842867debac0fcb1e3 | refs/heads/master | 2022-06-23T23:47:50.350379 | 2022-06-14T13:30:51 | 2022-06-14T13:38:56 | 51,970,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | '''
I. Create a script that does the following
A. Prompts the user to input an IP network.
Notes:
1. For simplicity the network is always assumed to be a /24 network
2. The network can be entered in using one of the following three formats 10.88.17.0, 10.88.17., or 10.88.17
B. Regardless of which of the three formats is used, store this IP network as a list in the following format ['10', '88', '17', '0'] i.e. a list with four octets (all strings), the last octet is always zero (a string).
Hint: There is a way you can accomplish this using a list slice.
Hint2: If you can't solve this question with a list slice, then try using the below if statement (note, we haven't discussed if/else conditionals yet; we will talk about them in the next class).
if len(octets) == 3:
octets.append('0')
elif len(octets) == 4:
octets[3] = '0'
C. Print the IP network out to the screen.
D. Print a table that looks like the following (columns 20 characters in width):
NETWORK_NUMBER FIRST_OCTET_BINARY FIRST_OCTET_HEX
88.19.107.0 0b1011000 0x58
'''
ip_addr = raw_input("Please enter an IP address: ")
print "The IP Address you entered is: " + ip_addr
ip_parts = ip_addr.split(".")
ip_parts[3] = "0"
ip_fix = ".".join(ip_parts)
first_octet_binary = bin(int(ip_parts[0]))
first_octet_hex = hex(int(ip_parts[0]))
print "%-20s %-20s %-20s" % ("NETWORK_NUMBER","FIRST_OCTET_BINARY","FIRST_OCTET_HEX")
print "%-20s %-20s %-20s" % (ip_fix,first_octet_binary,first_octet_hex) | [
"[email protected]"
] | |
87bdf10495c1654f797d96cbacd760e86d67a12b | 6c523408c0122547d84ac7926cb67ee24f070efd | /8-curso-cod3r-python/pacote/sub/arquivo.py | b14ce505b09a1ce3649e9b576adc4b70d3dc9d80 | [] | no_license | everaldobass/curso-python3 | 153f4364713f76984ccb00670defa927bdbdc4eb | 1874803ff957df246fa052f5588f6066f4f948d9 | refs/heads/master | 2023-05-02T13:25:50.169067 | 2021-05-18T20:01:52 | 2021-05-18T20:01:52 | 186,182,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | print("Funcionou") | [
"[email protected]"
] | |
42a67c3fb9dd598afba939fca288531e7c478b96 | 3fd0bd83099a2405c53c8f1b3f8235d7ebb46fbd | /tests/unit/specs/test_timer.py | 5f77be1c6fa44cc2d266e027f6b838b25f532ba4 | [
"MIT"
] | permissive | diogoaurelio/floto | 59df848e0844314c999ad0833fec3671ea942cb9 | 5d1dedf91ea427db1f0fd9d7005fc3fa36e17cb6 | refs/heads/master | 2021-01-22T16:38:20.500203 | 2016-03-22T07:31:04 | 2016-03-22T07:31:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | import pytest
import floto.specs
class TestTimer(object):
def test_init(self):
t = floto.specs.Timer(id_='my_timer', requires=['t2'], delay_in_seconds=600)
assert t.id_ == 'my_timer'
assert t.requires == ['t2']
assert t.delay_in_seconds == 600
| [
"[email protected]"
] | |
8726a52380b69f66f75746665c55e2a15f298458 | e3da1286e01dec6a2b096e3d2f6620218468a391 | /.scripts/submit.py | 9dab5a0c353face38047e702bdfbca6c71c92620 | [] | no_license | SquidneySquush/cse-20289 | e42d6314d3a05a75b9ba4461002647019a215211 | 2e377f65e5dfde07fce277dbb30e04531e84d59c | refs/heads/master | 2022-11-17T07:31:22.956873 | 2020-05-01T20:42:05 | 2020-05-01T20:42:05 | 264,722,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,165 | py | #!/usr/bin/env python3
import glob
import json
import os
import sys
import requests
import yaml
# Globals
ASSIGNMENTS = {}
DREDD_QUIZ_URL = 'https://dredd.h4x0r.space/quiz/cse-20289-sp20/'
DREDD_QUIZ_MAX = 4.0
# Utilities
def add_assignment(assignment, path=None):
if path is None:
path = assignment
if assignment.startswith('reading'):
ASSIGNMENTS[assignment] = path
def print_results(results):
for key, value in sorted(results):
try:
print('{:>8} {:.2f}'.format(key.title(), value))
except ValueError:
print('{:>8} {}'.format(key.title(), value))
# Submit Functions
def submit_quiz(assignment, path):
answers = None
for mod_load, ext in ((json.load, 'json'), (yaml.safe_load, 'yaml')):
try:
answers = mod_load(open(os.path.join(path, 'answers.' + ext)))
except IOError as e:
pass
except Exception as e:
print('Unable to parse answers.{}: {}'.format(ext, e))
return 1
if answers is None:
print('No quiz found (answers.{json,yaml})')
return 1
print('Submitting {} quiz ...'.format(assignment))
response = requests.post(DREDD_QUIZ_URL + assignment, data=json.dumps(answers))
print_results(response.json().items())
return 0 if response.json().get('score', 0) >= DREDD_QUIZ_MAX else 1
# Main Execution
# Add GitLab branch
try:
add_assignment(os.environ['CI_BUILD_REF_NAME'])
except KeyError:
pass
# Add local git branch
try:
add_assignment(os.popen('git symbolic-ref -q --short HEAD 2> /dev/null').read().strip())
except OSError:
pass
# Add current directory
add_assignment(os.path.basename(os.path.abspath(os.curdir)), os.curdir)
# For each assignment, submit quiz answers and program code
if not ASSIGNMENTS:
print('Nothing to submit!')
sys.exit(1)
exit_code = 0
for assignment, path in sorted(ASSIGNMENTS.items()):
print('Submitting {} assignment ...'.format(assignment))
if 'reading' in assignment:
exit_code += submit_quiz(assignment, path)
sys.exit(exit_code)
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
| [
"[email protected]"
] | |
97971326bba15e94906f8e029d5d8d64d6ef433d | 40b0284e928451f4c26c4aa75180bd3f36251e0d | /tw2/asyncresources/requirejs/__init__.py | df843783696a1cac737c908606160d0dc98e55ed | [
"MIT"
] | permissive | amol-/tw2.asyncresources | 2a8f318ff985b4ce2024bfa54707d389250601a2 | 298f6cc17ccc668f577b19322f2148b290c751b0 | refs/heads/master | 2021-01-18T14:38:42.437050 | 2014-05-12T09:23:43 | 2014-05-12T09:23:43 | 19,569,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import json
from tw2.core import params as tw2pm
from tw2.core.resources import JSLink as TW2JSLink
from tw2.core.resources import CSSLink as TW2CSSLink
from tw2.core.resources import JSSource as TW2JSSource
class JSLink(TW2JSLink):
inline_engine_name = 'genshi'
location = 'headbottom'
template = '<script type="text/javascript">require(["$w.link"])</script>'
class CSSLink(TW2CSSLink):
inline_engine_name = 'genshi'
location = 'headbottom'
template = '''<script type="text/javascript">
(function(url) {
var link = document.createElement("link");
link.type = "text/css";
link.rel = "stylesheet";
link.href = url;
document.getElementsByTagName("head")[0].appendChild(link);
})("$w.link");
</script>'''
class JSSource(TW2JSSource):
dependencies = tw2pm.Param('resources required by this script')
inline_engine_name = 'genshi'
template = '<script type="text/javascript">require(${w.js_dependencies}, function() { $w.src })</script>'
def prepare(self):
super(TW2JSSource, self).prepare()
self.js_dependencies = json.dumps(self.dependencies.js_links) | [
"[email protected]"
] | |
0d28b008daa72d90879d795f9f62367f1da6e748 | 46357db3b1c1af699384d9cba1ffbc3c732117ad | /selenium_basics/07_webdriver_api/scrolldown.py | 439b92428042e802fb07494fe1578a9eabdd4038 | [] | permissive | khanhdodang/automation-training-python | 28fbd70ca4bc84e47cf17d1e4702513863e38c44 | b16143961cee869c7555b449e2a05abeae2dc3b5 | refs/heads/master | 2023-07-11T05:21:34.495851 | 2021-08-18T01:29:37 | 2021-08-18T01:29:37 | 285,208,030 | 0 | 8 | MIT | 2020-09-29T07:01:15 | 2020-08-05T07:01:46 | Python | UTF-8 | Python | false | false | 420 | py | # import webdriver
from selenium import webdriver
# import time
import time
# create webdriver object
driver = webdriver.Chrome()
driver.maximize_window()
# get geeksforgeeks.org
driver.get("https://www.geeksforgeeks.org/")
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(3)
driver.execute_script("window.scrollTo(document.body.scrollHeight, 0);")
time.sleep(3)
driver.close()
| [
"[email protected]"
] | |
48cd2b90fd8d7b3acd2f71a560a9d8cfea3f13e2 | 61ce1dff5e61dde649e3908b6ba7c2a270a78e94 | /pastebin/debug_settings.py | 2ec0b6b08eb1b1e6d0357ef4758a6ac3d761d2df | [
"Unlicense"
] | permissive | xeddmc/pastebin-django | b9438104691b4c21af4a01e46e8c9ada7ee66403 | 5e38637e5a417ab907a353af8544f64a0ad2b127 | refs/heads/master | 2021-01-12T20:07:53.454983 | 2015-12-21T09:31:29 | 2015-12-21T09:31:29 | 45,421,895 | 0 | 0 | Unlicense | 2019-08-05T19:47:34 | 2015-11-02T21:03:40 | Python | UTF-8 | Python | false | false | 37 | py | from settings import *
DEBUG = True
| [
"[email protected]"
] | |
9912d2cab22351db39278bd3ea7187cff6c523e4 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_017/ch35_2020_10_05_19_36_22_782479.py | 5c7f486b60f6158998b37f504c5fe84a0b9a0d1b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | s=0
while s !=0:
x= int(input("Me diga um número: ")
if x==0:
s+=1
| [
"[email protected]"
] | |
6379119d77fb8304861e6bb9226ab3b25fb3c157 | 7f44a279773732b183963349d146a8dd9a195b88 | /home/migrations/0022_auto_20200929_1731.py | 7074f056ac8ef3685cb16efd7ff2ff64f2433ad2 | [] | no_license | pseudobabble/cms-boilerplate | f138060e2f25721191289eb261185136ae9cf6bd | 3923a8ebe1541118c5551b0996557f241943831f | refs/heads/master | 2022-12-28T01:30:49.554898 | 2020-10-15T15:23:10 | 2020-10-15T15:23:10 | 283,308,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | # Generated by Django 3.1.1 on 2020-09-29 17:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0021_auto_20200929_1725'),
]
operations = [
migrations.RemoveField(
model_name='artwork',
name='artwork_soundbite',
),
migrations.AddField(
model_name='soundbite',
name='artwork_soundbite',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='artwork_soundbites', to='home.artwork'),
),
]
| [
"[email protected]"
] | |
f3c793e84385a281b30ca565ac2bf8ddad2993fb | 0561900bf01598e6c453131952fe4f62b2f9c6e9 | /week2/List Problems/sorted_names.py | 9c0c756f42014ab1a873fe9e1df0d2fc72f6a37c | [] | no_license | Rositsazz/HackBulgaria-Programming0 | 497f40eefa373b024389c58e7d83aff3ffc547ac | 0590d7430ff0aadfb737593a04d3ab1eb894f8d3 | refs/heads/master | 2016-09-06T09:09:19.416648 | 2015-08-11T10:02:49 | 2015-08-11T10:02:49 | 30,868,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | n = input("Enter number:")
n = int(n)
count = 0
array = []
while count < n :
m = input("enter word: " )
array = array + [m]
count+=1
print("Sorted names are:")
b = sorted(array)
i = 0
while i<len(b):
print(b[i])
i+=1
| [
"[email protected]"
] | |
ed51a887cbf8c93ace619126a900925c2cbcdf54 | 78dc15505e17cef3e49410bbadc1bb4812cdbbad | /foiamachine/settings.default.py | 5f1cdffa10b06c608591e67abe7ad5e408b15d45 | [
"MIT"
] | permissive | jgillum/foiamachine | 4a7e4ef9fec681341c014dbe7c98bbce79debb4e | 26d3b02870227696cdaab639c39d47b2a7a42ae5 | refs/heads/master | 2020-06-29T11:19:46.232758 | 2019-08-19T02:27:45 | 2019-08-19T02:27:45 | 200,519,075 | 3 | 1 | null | 2019-08-04T16:57:27 | 2019-08-04T16:57:27 | null | UTF-8 | Python | false | false | 5,386 | py | # Django settings for foiamachine project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'foiamachine', # Or path to database file if using sqlite3.
'USER': 'foiamchine', # Not used with sqlite3.
'PASSWORD': 'xxxxxxxx', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'xxxxxxx'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'foiamachine.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'foiamachine.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
"[email protected]"
] | |
ec596f7b294b1180b3f3de8871017bdaef112020 | 9e3398214a4f12bf31e367b0b7a4810c24526a1f | /core/migrations/0001_initial.py | 04fb0789ed8aa582491be59e056c957fe38e7cee | [] | no_license | tricelex/django_ecommerce | 7987381a240f75015d77293c16594f37cbc0c29a | 7a9f9cb427214ea9134b59461e580616d2fc0ce5 | refs/heads/master | 2023-02-25T21:56:55.303780 | 2021-02-01T22:58:23 | 2021-02-01T22:58:23 | 327,143,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,371 | py | # Generated by Django 3.1.5 on 2021-01-06 16:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Item",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=100)),
("price", models.FloatField()),
],
),
migrations.CreateModel(
name="OrderItem",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"item",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="core.item"
),
),
],
),
migrations.CreateModel(
name="Order",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("start_date", models.DateTimeField(auto_now_add=True)),
("ordered_date", models.DateTimeField()),
("ordered", models.BooleanField(default=False)),
("items", models.ManyToManyField(to="core.OrderItem")),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
| [
"[email protected]"
] | |
450ff22a3190894301c71874dd1f607b45be7e88 | 846119600b78936f9004bc3254aa8e6141e7c198 | /setup.py | 185ef6bf3b43da989871790301371722c9282f01 | [] | no_license | slaskis/django-cachalot | e63ef8e5cf54988a0001a29f15c907e004355169 | 2b8f001a212d7774e450a0b2bf82e7d56e8b1e61 | refs/heads/master | 2020-03-25T20:22:46.572296 | 2018-07-27T18:27:56 | 2018-07-27T18:27:56 | 144,128,413 | 0 | 0 | null | 2018-08-09T09:08:40 | 2018-08-09T09:08:39 | null | UTF-8 | Python | false | false | 1,410 | py | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
from cachalot import __version__
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CURRENT_PATH, 'requirements.txt')) as f:
required = f.read().splitlines()
setup(
name='django-cachalot',
version=__version__,
author='Bertrand Bordage',
author_email='[email protected]',
url='https://github.com/noripyt/django-cachalot',
description='Caches your Django ORM queries '
'and automatically invalidates them.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
license='BSD',
packages=find_packages(),
install_requires=required,
include_package_data=True,
zip_safe=False,
)
| [
"[email protected]"
] | |
e13648df911fd331425d79cdea89def40d16be9a | f810836bea801f2fa85418ac7f5f5ffb0f3e0bda | /abc/abc026/A - 掛け算の最大値.py | 0173704542455c55c832fe6424cfe982717d6a44 | [] | no_license | cocoinit23/atcoder | 0afac334233e5f8c75d447f6adf0ddf3942c3b2c | 39f6f6f4cc893e794d99c514f2e5adc9009ee8ca | refs/heads/master | 2022-08-29T06:01:22.443764 | 2022-07-29T07:20:05 | 2022-07-29T07:20:05 | 226,030,199 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | a = int(input())
ans = 0
for i in range(a + 1):
for j in range(a + 1 - i):
ans = max(ans, i * j)
print(ans)
| [
"[email protected]"
] | |
73d8b6289524e9c1ef1e6c5e5eb989f59323f556 | 293b5687000daed5e5b610c912ca3cd41bb1c942 | /python/EightTeV/GMSB_Lambda180_CTau10000_8TeV_pythia6_cff.py | 71f458d3fd359a5f19e4169824815a29afdf3328 | [] | no_license | amagitte/genproductions | a5ea90b74c025569b8088ca8c53459cc27e3a415 | b081e07a1f7a3843a07ca56ef98706925233b0e1 | refs/heads/master | 2020-05-29T11:07:03.440946 | 2015-05-21T13:17:30 | 2015-05-21T13:17:30 | 36,014,150 | 0 | 0 | null | 2015-05-21T13:24:28 | 2015-05-21T13:24:28 | null | UTF-8 | Python | false | false | 1,816 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2starSettings_cfi import *
generator = cms.EDFilter("Pythia6GeneratorFilter",
pythia6HepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
pythia6PylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
comEnergy = cms.double(8000.0),
PythiaParameters = cms.PSet(
pythia6UESettingsBlock,
processParameters = cms.vstring(
'MSEL=39 ! All SUSY processes',
'IMSS(1) = 11 ! Spectrum from external SLHA file',
'IMSS(11) = 1 ! keeps gravitino mass from being overwritten',
'IMSS(21) = 33 ! LUN number for SLHA File (must be 33)',
'IMSS(22) = 33 ! Read-in SLHA decay table',
'PARJ(71)=10000. ! for which ctau 10000 mm',
'RMSS(21) = 0 ! The gravitino mass'),
parameterSets = cms.vstring('pythia6UESettings',
'processParameters',
'SLHAParameters'),
SLHAParameters = cms.vstring('SLHAFILE = Configuration/Generator/data/GMSB_Lambda180_CTau10000_pythia6.slha')
#SLHAParameters = cms.vstring('SLHAFILE = GMSB-8-TeV/8-TeV-Samples/python/GMSB_Lambda180_CTau10000_pythia6.slha')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: Configuration/GenProduction/python/EightTeV/GMSB_Lambda180_CTau10000_8TeV_pythia6_cff.py,v $'),
annotation = cms.untracked.string('GMSB Lambda=180TeV and ctau=10000 at 8 TeV')
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
64870bd723301749859cd4d0d6225eb595790355 | 3dcf4fe1f47ff5682b9f033dc82e58cc223ce9b4 | /features/steps/placeholder.py | 8af196f8af6d0e73518af2f3f09e70f1c5dfd6ed | [
"MIT"
] | permissive | roeman/python-pptx | 549e3e563acf527b7259795361f220c363458a3d | 1052d94f4397730655c719d61cf858344f1da18d | refs/heads/master | 2021-01-21T16:27:44.922761 | 2014-02-11T09:25:03 | 2014-02-11T09:25:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | # encoding: utf-8
"""
Gherkin step implementations for placeholder-related features.
"""
from __future__ import absolute_import
from behave import given, when, then
from hamcrest import assert_that, equal_to, is_
from pptx import Presentation
from .helpers import saved_pptx_path, test_text
# given ===================================================
@given('a bullet body placeholder')
def given_a_bullet_body_placeholder(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[1]
context.sld = context.prs.slides.add_slide(slidelayout)
context.body = context.sld.shapes.placeholders[1]
# when ====================================================
@when('I indent the first paragraph')
def step_when_indent_first_paragraph(context):
context.body.textframe.paragraphs[0].level = 1
@when("I set the title text of the slide")
def step_when_set_slide_title_text(context):
context.sld.shapes.title.text = test_text
# then ====================================================
@then('the paragraph is indented')
def then_paragraph_is_indented(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
body = sld.shapes.placeholders[1]
p = body.textframe.paragraphs[0]
assert_that(p.level, is_(equal_to(1)))
@then('the text appears in the title placeholder')
def step_then_text_appears_in_title_placeholder(context):
prs = Presentation(saved_pptx_path)
title_shape = prs.slides[0].shapes.title
title_text = title_shape.textframe.paragraphs[0].runs[0].text
assert_that(title_text, is_(equal_to(test_text)))
| [
"[email protected]"
] | |
a80416871d4482e17a38e5f9de8b713ad6f9f2e2 | 6b82f8dee58795adacab6e090cfcc812cdafdbdd | /src/autovehicle_scripts/src/autovehicle_move.py | b80df8dafd5dd5272a0ef3bee58d350a817360c4 | [] | no_license | AlexanderVieira/autovehicle | 00b8bee757c6e4214a9db4f88d342e52c0221272 | 8c3b4f740cdbb451aecc09aea25e57b3a067c105 | refs/heads/main | 2023-04-04T13:23:29.692007 | 2021-04-01T05:29:47 | 2021-04-01T05:29:47 | 340,527,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,969 | py | #!/usr/bin/env python3
import rospy
import tf
import numpy
import geometry_msgs.msg
from geometry_msgs.msg import Twist
from math import *
from nav_msgs.msg import Odometry
from std_msgs.msg import String
import tf2_ros
import copy
LINEAR_VELOCITY_MINIMUM_THRESHOLD = 0.2
ANGULAR_VELOCITY_MINIMUM_THRESHOLD = 0.4
class free_space_navigation():
def poseCallback(self,pose_message):
self.autovehicle_odom_pose.pose.pose.position.x=pose_message.pose.pose.position.x
self.autovehicle_odom_pose.pose.pose.position.y=pose_message.pose.pose.position.y
self.autovehicle_odom_pose.pose.pose.position.z=pose_message.pose.pose.position.z
self.autovehicle_odom_pose.pose.pose.orientation.w=pose_message.pose.pose.orientation.w
self.autovehicle_odom_pose.pose.pose.orientation.x=pose_message.pose.pose.orientation.x
self.autovehicle_odom_pose.pose.pose.orientation.y=pose_message.pose.pose.orientation.y
self.autovehicle_odom_pose.pose.pose.orientation.z=pose_message.pose.pose.orientation.z
# a function that makes the robot move straight
# @param speed: represents the speed of the robot the robot
# @param distance: represents the distance to move by the robot
# @param isForward: if True, the robot moves forward,otherwise, it moves backward
#
# Method 1: using tf and Calculate the distance between the two transformations
def move_v1(self, speed, distance, isForward):
#declare a Twist message to send velocity commands
VelocityMessage = Twist()
# declare tf transform listener: this transform listener will be used to listen and capture the transformation between
# the odom frame (that represent the reference frame) and the base_link frame the represent moving frame
listener = tf.TransformListener()
#set the linear velocity to a positive value if isFoward is True
if (isForward):
VelocityMessage.linear.x =abs(speed)
else: #else set the velocity to negative value to move backward
VelocityMessage.linear.x =-abs(speed)
# all velocities of other axes must be zero.
VelocityMessage.linear.y =0
VelocityMessage.linear.z =0
#The angular velocity of all axes must be zero because we want a straight motion
VelocityMessage.angular.x = 0
VelocityMessage.angular.y = 0
VelocityMessage.angular.z =0
distance_moved = 0.0
loop_rate = rospy.Rate(10) # we publish the velocity at 10 Hz (10 times a second)
# First, we capture the initial transformation before starting the motion.
# we call this transformation "init_transform"
# It is important to "waitForTransform" otherwise, it might not be captured.
try:
#wait for the transform to be found
listener.waitForTransform("/base_link", "/odom", rospy.Time(0),rospy.Duration(10.0))
#Once the transform is found,get the initial_transform transformation.
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
#listener.lookupTransform("/base_link", "/odom", rospy.Time(0),init_transform)
start = 0.5 * sqrt(trans[0] ** 2 + trans[1] ** 2)
except Exception:
rospy.Duration(1.0)
distance_moved = 0
while True :
rospy.loginfo("Autovehicle moves forwards")
#/***************************************
# * STEP1. PUBLISH THE VELOCITY MESSAGE
# ***************************************/
self.velocityPublisher.publish(VelocityMessage)
loop_rate.sleep()
#/**************************************************
# * STEP2. ESTIMATE THE DISTANCE MOVED BY THE ROBOT
# *************************************************/
try:
#wait for the transform to be found
listener.waitForTransform("/base_link", "/odom", rospy.Time(0), rospy.Duration(10.0) )
#Once the transform is found,get the initial_transform transformation.
#listener.lookupTransform("/base_link", "/odom",rospy.Time(0))
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.Duration(1.0)
# Calculate the distance moved by the robot
# There are two methods that give the same result
#
# Method 1: Calculate the distance between the two transformations
# Hint:
# --> transform.getOrigin().x(): represents the x coordinate of the transformation
# --> transform.getOrigin().y(): represents the y coordinate of the transformation
#
# calculate the distance moved
end = 0.5 * sqrt(trans[0] ** 2 + trans[1] ** 2)
distance_moved = distance_moved+abs(abs(float(end)) - abs(float(start)))
if not (distance_moved<distance):
break
#finally, stop the robot when the distance is moved
VelocityMessage.linear.x =0
self.velocityPublisher.publish(VelocityMessage)
def move_v2(self, speed, distance, isForward):
#declare a Twist message to send velocity commands
VelocityMessage = Twist()
# declare tf transform listener: this transform listener will be used to listen and capture the transformation between
# the odom frame (that represent the reference frame) and the base_link frame the represent moving frame
# set the linear velocity to a positive value if isFoward is True
listener = tf.TransformListener()
#declare tf transform
initial_autovehicle_odom_pose = Odometry()
#init_transform: is the transformation before starting the motion
init_transform = geometry_msgs.msg.TransformStamped()
#current_transformation: is the transformation while the robot is moving
current_transform = geometry_msgs.msg.TransformStamped()
if (isForward):
VelocityMessage.linear.x =abs(speed)
else: #else set the velocity to negative value to move backward
VelocityMessage.linear.x =-abs(speed)
#all velocities of other axes must be zero.
VelocityMessage.linear.y =0.0
VelocityMessage.linear.z =0.0
VelocityMessage.angular.x =0.0
VelocityMessage.angular.y =0.0
VelocityMessage.angular.z =0.0
distance_moved = 0.0
loop_rate = rospy.Rate(10) # we publish the velocity at 10 Hz (10 times a second)
# First, we capture the initial transformation before starting the motion.
# we call this transformation "init_transform"
# It is important to "waitForTransform" otherwise, it might not be captured.
try:
#wait for the transform to be found
listener.waitForTransform("/base_link", "/odom", rospy.Time(0),rospy.Duration(10.0))
#Once the transform is found,get the initial_transform transformation.
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
#listener.lookupTransform("/base_link", "/odom", rospy.Time(0),init_transform)
trans1_mat = tf.transformations.translation_matrix(trans)
rot1_mat = tf.transformations.quaternion_matrix(rot)
mat1 = numpy.dot(trans1_mat, rot1_mat)
init_transform.transform.translation = trans
init_transform.transform.rotation =rot
#print(mat1)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.Duration(1.0)
while True :
rospy.loginfo("Autovehicle moves forwards")
#/***************************************
# * STEP1. PUBLISH THE VELOCITY MESSAGE
# ***************************************/
self.velocityPublisher.publish(VelocityMessage)
loop_rate.sleep()
#/**************************************************
# * STEP2. ESTIMATE THE DISTANCE MOVED BY THE ROBOT
# *************************************************/
try:
#wait for the transform to be found
listener.waitForTransform("/base_link", "/odom", rospy.Time(0), rospy.Duration(10.0) )
#Once the transform is found,get the initial_transform transformation.
#listener.lookupTransform("/base_link", "/odom",rospy.Time(0))
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.Duration(1.0)
trans1_mat = tf.transformations.translation_matrix(trans)
rot1_mat = tf.transformations.quaternion_matrix(rot)
mat2 = numpy.dot(trans1_mat, rot1_mat)
#print(mat2)
mat3 = numpy.dot(mat1, mat2)
#print(mat3)
trans3 = tf.transformations.translation_from_matrix(mat3)
#print(trans3)
rot3 = tf.transformations.quaternion_from_matrix(mat3)
#print(rot3)
current_transform.transform.translation = trans
current_transform.transform.rotation =rot
distance_moved = distance_moved + (0.5 * sqrt(trans3[0] ** 2 + trans3[1] ** 2))
#print(distance_moved)
#print (numpy.linalg.norm(trans3))
if not (distance_moved<distance):
rospy.loginfo("reached")
break
#finally, stop the robot when the distance is moved
VelocityMessage.linear.x =0
self.velocityPublisher.publish(VelocityMessage)
# a function that makes the robot move straight
# @param speed: represents the speed of the robot the robot
# @param distance: represents the distance to move by the robot
# @param isForward: if True, the robot moves forward,otherwise, it moves backward
#
# Method 3: we use coordinates of the robot to estimate the distance
def move_v3(self, speed, distance, isForward):
#declare a Twist message to send velocity commands
VelocityMessage = Twist()
# declare tf transform listener: this transform listener will be used to listen and capture the transformation between
# the odom frame (that represent the reference frame) and the base_link frame the represent moving frame
listener = tf.TransformListener()
#declare tf transform
initial_autovehicle_odom_pose = Odometry()
#init_transform: is the transformation before starting the motion
init_transform = geometry_msgs.msg.TransformStamped()
#current_transformation: is the transformation while the robot is moving
current_transform = geometry_msgs.msg.TransformStamped()
#set the linear velocity to a positive value if isFoward is True
if (isForward):
VelocityMessage.linear.x =abs(speed)
else: #else set the velocity to negative value to move backward
VelocityMessage.linear.x =-abs(speed)
# all velocities of other axes must be zero.
VelocityMessage.linear.y =0
VelocityMessage.linear.z =0
#The angular velocity of all axes must be zero because we want a straight motion
VelocityMessage.angular.x = 0
VelocityMessage.angular.y = 0
VelocityMessage.angular.z = 0
distance_moved = 0.0
loop_rate = rospy.Rate(10) # we publish the velocity at 10 Hz (10 times a second)
#we update the initial_autovehicle_odom_pose using the autovehicle_odom_pose global variable updated in the callback function poseCallback
#we will use deepcopy() to avoid pointers confusion
initial_autovehicle_odom_pose = copy.deepcopy(self.autovehicle_odom_pose)
while True :
rospy.loginfo("Autovehicle moves forwards")
self.velocityPublisher.publish(VelocityMessage)
loop_rate.sleep()
#rospy.Duration(1.0)
distance_moved = distance_moved+abs(0.5 * sqrt(((self.autovehicle_odom_pose.pose.pose.position.x-initial_autovehicle_odom_pose.pose.pose.position.x) ** 2) +
((self.autovehicle_odom_pose.pose.pose.position.y-initial_autovehicle_odom_pose.pose.pose.position.y) ** 2)))
rospy.loginfo("distance_moved = {}".format(distance_moved))
rospy.loginfo("distance = {}".format(distance))
if not (distance_moved<distance):
rospy.loginfo("reached")
break
#finally, stop the robot when the distance is moved
VelocityMessage.linear.x =0
self.velocityPublisher.publish(VelocityMessage)
def degree2radian(self, degreeAngle):
return (degreeAngle/57.2957795)
def rotate(self,angular_velocity,radians,clockwise):
rotateMessage = Twist()
#declare tf transform
listener = tf.TransformListener()
#init_transform: is the transformation before starting the motion
init_transform = geometry_msgs.msg.TransformStamped()
#current_transformation: is the transformation while the robot is moving
current_transform = geometry_msgs.msg.TransformStamped()
angle_turned = 0.0
angular_velocity = (-angular_velocity, ANGULAR_VELOCITY_MINIMUM_THRESHOLD)[angular_velocity > ANGULAR_VELOCITY_MINIMUM_THRESHOLD]
while(radians < 0):
radians += 2* pi
while(radians > 2* pi):
radians -= 2* pi
listener.waitForTransform("/base_link", "/odom", rospy.Time(0), rospy.Duration(10.0) )
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
#listener.lookupTransform("/base_link", "/odom", rospy.Time(0),init_transform)
init_transform.transform.translation = trans
init_transform.transform.rotation =rot
#since the rotation is only in the Z-axes
#start_angle = tf.transformations.#0.5 * sqrt(rot[2] ** 2)
euler = tf.transformations.euler_from_quaternion(rot)
roll = euler[0]
pitch = euler[1]
start_angle = euler[2]
rotateMessage.linear.x = rotateMessage.linear.y = 0.0
rotateMessage.angular.z = angular_velocity
if (clockwise):
rotateMessage.angular.z = -rotateMessage.angular.z
loop_rate = rospy.Rate(20)
while True:
rospy.loginfo("Autovehicle is Rotating")
self.velocityPublisher.publish(rotateMessage)
loop_rate.sleep()
#rospy.Duration(1.0)
try:
#wait for the transform to be found
listener.waitForTransform("/base_link", "/odom", rospy.Time(0), rospy.Duration(10.0) )
#Once the transform is found,get the initial_transform transformation.
#listener.lookupTransform("/base_link", "/odom",rospy.Time(0))
(trans,rot) = listener.lookupTransform('/base_link', '/odom', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.Duration(1.0)
current_transform.transform.translation = trans
current_transform.transform.rotation =rot
#since the rotation is only in the Z-axes
#end_angle = 0.5 * sqrt( rot[2] ** 2)
euler = tf.transformations.euler_from_quaternion(rot)
roll = euler[0]
pitch = euler[1]
end_angle = euler[2]
angle_turned = abs(end_angle - start_angle)
rospy.loginfo("angle_turned: %s" %angle_turned)
if (angle_turned > radians):
break
self.velocityPublisher.publish(rotateMessage)
loop_rate.sleep()
def calculateYaw( x1, y1, x2,y2):
bearing = atan2((y2 - y1),(x2 - x1))
#if(bearing < 0) bearing += 2 * PI
bearing *= 180.0 / pi
return bearing
def moveSquare(self,sideLength):
for i in range(0, 4):
self.move_v1(0.3, sideLength, True)
#self.shutdown()
self.rotate(0.2, self.degree2radian(90.0),True)
def __init__(self):
# initiliaze
rospy.init_node('free_space_navigation', anonymous=True)
# What to do you ctrl + c
rospy.on_shutdown(self.shutdown)
self.autovehicle_odom_pose = Odometry()
pose_message = Odometry()
self.velocityPublisher = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.pose_subscriber = rospy.Subscriber("/odom", Odometry, self.poseCallback)
# 2 HZ
r = rospy.Rate(10)
r.sleep()
while not rospy.is_shutdown():
#sideLength=1
#self.moveSquare(sideLength)
self.rotate(0.3, self.degree2radian(90.0), True)
#self.rotate(0.3, self.degree2radian(90.0), False)
#self.move_v3(1.0, 7.0, True)
#self.move_v2(1.0, 10.0, True)
def shutdown(self):
# stop autovehicle
rospy.loginfo("Stop Drawing Squares")
self.velocityPublisher.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
free_space_navigation()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("node terminated.")
| [
"[email protected]"
] | |
527a127d9ca3c34c0049745107072334589feedc | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/restore_to_existing_instance_request.py | eaeaf16d79bd59ce2328a16f998669aec7efff70 | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,600 | py | # coding: utf-8
import pprint
import re
import six
class RestoreToExistingInstanceRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'RestoreToExistingInstanceRequestBody'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""RestoreToExistingInstanceRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this RestoreToExistingInstanceRequest.
语言
:return: The x_language of this RestoreToExistingInstanceRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this RestoreToExistingInstanceRequest.
语言
:param x_language: The x_language of this RestoreToExistingInstanceRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this RestoreToExistingInstanceRequest.
:return: The body of this RestoreToExistingInstanceRequest.
:rtype: RestoreToExistingInstanceRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RestoreToExistingInstanceRequest.
:param body: The body of this RestoreToExistingInstanceRequest.
:type: RestoreToExistingInstanceRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RestoreToExistingInstanceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
d3cc070bb3e6485c7ad08ccdbd9116c405ed25d9 | 55538c0680f270a007c64e51cf0ecd417b0929c2 | /Dinosaur_bot.py | 652448bc2b3d671c38a14bff4fa65f6fa041fbe4 | [] | no_license | alankrit03/PROJECTS | 62fff0a10ec5056ce8a907ca9bd7699f3fff1567 | b0242c680f4be0efd7d78d21aea72ce38e7356e6 | refs/heads/master | 2021-05-18T17:01:07.543489 | 2020-04-11T16:34:51 | 2020-04-11T16:34:51 | 251,328,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 18 10:19:25 2019
@author: Alankrit Agarwal
"""
from PIL import ImageGrab,ImageOps
import pyautogui
import time
from numpy import array
class coOrdinates():
replay_button = (480,460)
dinosuar = (189,439)
def restartGame():
pyautogui.click(coOrdinates.replay_button)
def jump():
pyautogui.keyDown('space')
time.sleep(0.05)
# print('Jump')
pyautogui.keyUp('space')
def imageGrab():
box = (coOrdinates.dinosuar[0]+60,coOrdinates.dinosuar[1],coOrdinates.dinosuar[0]+100,coOrdinates.dinosuar[1]+30)
image = ImageGrab.grab(box)
grayImage = ImageOps.grayscale(image)
a=array(grayImage.getcolors())
print(a.sum())
return(a.sum())
def main():
restartGame()
while(True):
if(imageGrab()!=1447):
print(imageGrab())
jump()
time.sleep(0.1)
main()
| [
"[email protected]"
] | |
266a16f03b660b672e82b8bea1f406c944d034fd | 7c63a96fad4257f4959ffeba0868059fc96566fb | /py/m_lutz-programming_python-4_ed/code/ch_07/24-common_appearance_2/main.pyw | f3d0a30542bc7f07fcacfe426e4a921bf50a1d3f | [
"MIT"
] | permissive | ordinary-developer/education | b426148f5690f48e0ed4853adfc3740bd038b72c | 526e5cf86f90eab68063bb7c75744226f2c54b8d | refs/heads/master | 2023-08-31T14:42:37.237690 | 2023-08-30T18:15:18 | 2023-08-30T18:15:18 | 91,232,306 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | pyw | from tkinter import *
from user_preferences import bcolor, bfont, bsize
class ThemedButton(Button):
def __init__(self, parent = None, **configs):
Button.__init__(self, parent, **configs)
self.pack()
self.config(bg = bcolor, font = (bfont, bsize))
def onSpam():
print('Spam')
def onEggs():
print('Eggs')
class MyButton(ThemedButton):
def __init__(self, parent = None, **configs):
ThemedButton.__init__(self, parent, **configs)
self.config(text = 'subclass')
if __name__ == '__main__':
ThemedButton(text = 'spam', command = onSpam)
ThemedButton(text = 'eggs', command = onEggs)
MyButton(command = onSpam)
mainloop()
| [
"[email protected]"
] | |
466ec891360a35814ebe15a57bbeee8f7f2adbbc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02726/s686642226.py | b305cc29a6765e5b6e23b79cc90a1b232f86414b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import itertools
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
N,X,Y = list(map(int , input().split()))
combi = list(itertools.combinations(range(1,N+1),2))
ansList = [0 for _ in range(N)]
for i,j in combi:
kyori = min(j-i, abs(j-Y) + abs(X-i) + 1)
ansList[kyori] += 1
# print(i,j,ansList)
for ans in ansList[1:]:
print(ans)
| [
"[email protected]"
] | |
f4151d089fa928d1bb232c998e63f1317eb19335 | d3ed9a2d85b2e18cda7c94c67a72a0b6747062d5 | /blog_env/bin/easy_install3 | 9663734d83b0899f76667a24f96e6d26af533c55 | [] | no_license | mrk24251/blogg | dce3cf1073b120bad4b510305379ea47f14fef56 | 533eae6ea922ecbcc518076abf06b9fb4c533ecc | refs/heads/master | 2022-12-11T22:03:40.552450 | 2020-10-30T21:40:34 | 2020-10-30T21:40:34 | 238,296,805 | 1 | 0 | null | 2022-12-08T04:26:41 | 2020-02-04T20:13:41 | JavaScript | UTF-8 | Python | false | false | 258 | #!/home/mrk2/Desktop/blogg/blog_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
453c5be294b70396ec8c7f1b5601a359121582f9 | 63c8b9227a6b3178d918769042ecb060acc557be | /cwf/gateway/fabfile.py | 331ddde55e0aab0eb396b96748426e32c4ecab5e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | snwfdhmp/magma | 7c4898db68d2668fd39ed25f73bb9a2bc5959066 | 8b3ff20a2717337a83c8ef531fa773a851d2e54d | refs/heads/master | 2020-12-06T09:06:25.806497 | 2020-01-07T18:27:09 | 2020-01-07T18:28:51 | 232,418,366 | 1 | 0 | NOASSERTION | 2020-01-07T21:12:28 | 2020-01-07T21:12:27 | null | UTF-8 | Python | true | false | 7,077 | py | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import sys
from distutils.util import strtobool
from fabric.api import cd, env, execute, lcd, local, put, run, settings, sudo
sys.path.append('../../orc8r')
from tools.fab.hosts import ansible_setup, vagrant_setup
CWAG_ROOT = "$MAGMA_ROOT/cwf/gateway"
CWAG_INTEG_ROOT = "$MAGMA_ROOT/cwf/gateway/integ_tests"
LTE_AGW_ROOT = "../../lte/gateway"
CWAG_TEST_IP = "192.168.128.2"
TRF_SERVER_IP = "192.168.129.42"
TRF_SERVER_SUBNET = "192.168.129.0"
CWAG_BR_NAME = "cwag_br0"
CWAG_TEST_BR_NAME = "cwag_test_br0"
def integ_test(gateway_host=None, test_host=None, trf_host=None,
destroy_vm="False"):
"""
Run the integration tests. This defaults to running on local vagrant
machines, but can also be pointed to an arbitrary host (e.g. amazon) by
passing "address:port" as arguments
gateway_host: The ssh address string of the machine to run the gateway
services on. Formatted as "host:port". If not specified, defaults to
the `cwag` vagrant box.
test_host: The ssh address string of the machine to run the tests on
on. Formatted as "host:port". If not specified, defaults to the
`cwag_test` vagrant box.
trf_host: The ssh address string of the machine to run the tests on
on. Formatted as "host:port". If not specified, defaults to the
`magma_trfserver` vagrant box.
"""
destroy_vm = bool(strtobool(destroy_vm))
# Setup the gateway: use the provided gateway if given, else default to the
# vagrant machine
if not gateway_host:
vagrant_setup("cwag", destroy_vm)
else:
ansible_setup(gateway_host, "cwag", "cwag_dev.yml")
execute(_run_unit_tests)
execute(_set_cwag_configs)
cwag_host_to_mac = execute(_get_br_mac, CWAG_BR_NAME)
host = env.hosts[0]
cwag_br_mac = cwag_host_to_mac[host]
# Transfer built images from local machine to CWAG host
if gateway_host:
execute(_transfer_docker_images)
else:
execute(_stop_gateway)
execute(_build_gateway)
execute(_run_gateway)
# Setup the trfserver: use the provided trfserver if given, else default to the
# vagrant machine
with lcd(LTE_AGW_ROOT):
if not trf_host:
vagrant_setup("magma_trfserver", destroy_vm)
else:
ansible_setup(trf_host, "trfserver", "magma_trfserver.yml")
execute(_start_trfserver)
# Run the tests: use the provided test machine if given, else default to
# the vagrant machine
if not test_host:
vagrant_setup("cwag_test", destroy_vm)
else:
ansible_setup(test_host, "cwag_test", "cwag_test.yml")
cwag_test_host_to_mac = execute(_get_br_mac, CWAG_TEST_BR_NAME)
host = env.hosts[0]
cwag_test_br_mac = cwag_test_host_to_mac[host]
execute(_set_cwag_test_configs)
execute(_start_ue_simulator)
# Get back to the gateway vm to setup static arp
if not gateway_host:
vagrant_setup("cwag", destroy_vm)
else:
ansible_setup(gateway_host, "cwag", "cwag_dev.yml")
execute(_set_cwag_networking, cwag_test_br_mac)
# Start tests
if not test_host:
vagrant_setup("cwag_test", destroy_vm)
else:
ansible_setup(test_host, "cwag_test", "cwag_test.yml")
execute(_set_cwag_test_networking, cwag_br_mac)
execute(_run_integ_tests, test_host, trf_host)
def _transfer_docker_images():
output = local("docker images cwf_*", capture=True)
for line in output.splitlines():
if not line.startswith('cwf'):
continue
line = line.rstrip("\n")
image = line.split(" ")[0]
local("docker save -o /tmp/%s.tar %s" % (image, image))
put("/tmp/%s.tar" % image, "%s.tar" % image)
local("rm -f /tmp/%s.tar" % image)
run('docker load -i %s.tar' % image)
def _set_cwag_configs():
""" Set the necessary config overrides """
with cd(CWAG_INTEG_ROOT):
sudo('mkdir -p /var/opt/magma')
sudo('mkdir -p /var/opt/magma/configs')
sudo('cp gateway.mconfig /var/opt/magma/configs/')
def _set_cwag_networking(mac):
sudo('arp -s %s %s' % (CWAG_TEST_IP, mac))
def _get_br_mac(bridge_name):
mac = run("cat /sys/class/net/%s/address" % bridge_name)
return mac
def _set_cwag_test_configs():
""" Set the necessary test configs """
sudo('mkdir -p /etc/magma')
# Create empty uesim config
sudo('touch /etc/magma/uesim.yml')
def _set_cwag_test_networking(mac):
# Don't error if route already exists
with settings(warn_only=True):
sudo('ip route add %s/24 dev %s proto static scope link' %
(TRF_SERVER_SUBNET, CWAG_TEST_BR_NAME))
sudo('arp -s %s %s' % (TRF_SERVER_IP, mac))
def _stop_gateway():
""" Stop the gateway docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' down')
def _build_gateway():
""" Builds the gateway docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' build --parallel')
def _run_gateway():
""" Runs the gateway's docker images """
with cd(CWAG_ROOT + '/docker'):
sudo(' docker-compose'
' -f docker-compose.yml'
' -f docker-compose.override.yml'
' -f docker-compose.integ-test.yml'
' up -d ')
def _start_ue_simulator():
""" Starts the UE Sim Service """
with cd(CWAG_ROOT + '/services/uesim/uesim'):
run('tmux new -d \'go run main.go\'')
def _start_trfserver():
""" Starts the traffic gen server"""
run('nohup iperf3 -s -B %s > /dev/null &' % TRF_SERVER_IP, pty=False)
def _run_unit_tests():
""" Run the cwag unit tests """
with cd(CWAG_ROOT):
run('make test')
def _run_integ_tests(test_host, trf_host):
""" Run the integration tests """
with cd(CWAG_INTEG_ROOT):
result = run('make integ_test', warn_only=True)
if not test_host and not trf_host:
# Clean up only for now when running locally
execute(_clean_up)
if result.return_code == 0:
print("Integration Test Passed!")
sys.exit(0)
else:
print("Integration Test returned ", result.return_code)
sys.exit(result.return_code)
def _clean_up():
# already in cwag test vm at this point
# Kill uesim service
run('pkill go', warn_only=True)
with lcd(LTE_AGW_ROOT):
vagrant_setup("magma_trfserver", False)
run('pkill iperf3 > /dev/null &', pty=False, warn_only=True)
| [
"[email protected]"
] | |
1a87536b6ff79a033ee52fef26498ed28d0bd950 | 2820f09d142c00a9d6b24373f6c5ef502c4df75f | /Robot_Arm/gui/owirobot.py | 92d18c07b41d054b7e17175849d7829273ee8d64 | [] | no_license | IdeasGarage/code_club | 53a08bb5c122ea69194fd2360e490b7fa2aa3d03 | f638963b4bc0dd2c0d3473fb2c2a5bb3f7a3d185 | refs/heads/master | 2020-04-12T03:12:47.481116 | 2017-01-24T17:18:04 | 2017-01-24T17:18:04 | 43,813,443 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,455 | py | """
Copyright 2013 Steve Battle
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import usb.core
import usb.util
import time
from Tkinter import *
root = Tk()
dev = usb.core.find(idVendor=0x1267, idProduct=0);
if dev is None:
raise ValueError('Device not found')
STOP = [0,0]
GRIP_CLOSE = [1,0]
GRIP_OPEN = [2,0]
WRIST_UP = [4,0]
WRIST_DOWN = [8,0]
ELBOW_UP = [16,0]
ELBOW_DOWN = [32,0]
SHOULDER_UP = [64,0]
SHOULDER_DOWN= [128,0]
BASE_COUNTERCLOCKWISE = [0,1]
BASE_CLOCKWISE = [0,2]
duration = 1
def command2(cmd, light):
c = list(cmd)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
def command(cmd, light, duration):
c = list(cmd)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
time.sleep(duration)
c = list(STOP)
c.append(light)
dev.ctrl_transfer(0x40,6,0x100,0,c,1000)
class Window:
def __init__(self, parent):
frame = Frame(parent)
frame.pack()
Label(frame, text="OWI Robot").grid(row=0, column=1)
Label(frame, text="Gripper:").grid(row=1)
self.btn1 = Button(frame, text="close", command=self.grip_close)
self.btn1.grid(row=1, column=1)
self.btn2 = Button(frame, text="open", command=self.grip_open)
self.btn2.grid(row=1, column=2)
Label(frame, text="Wrist:").grid(row=2)
self.wristScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.wrist)
self.wristScale.grid(row=2, column=1)
Label(frame, text="Elbow:").grid(row=3)
self.elbowScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.elbow)
self.elbowScale.grid(row=3, column=1)
Label(frame, text="Shoulder:").grid(row=4)
self.shoulderScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.shoulder)
self.shoulderScale.grid(row=4, column=1)
Label(frame, text="Base:").grid(row=5)
self.baseScale = Scale(frame, from_=-1, to=1, orient=HORIZONTAL, command=self.base)
self.baseScale.grid(row=5, column=1)
self.lightVar = IntVar()
self.cb = Checkbutton(frame, text="Light", command=self.light, variable=self.lightVar, offvalue=0, onvalue=1)
self.cb.grid(row=6)
def grip_close(self):
command(GRIP_CLOSE,self.lightVar.get(),duration)
def grip_open(self):
command(GRIP_OPEN,self.lightVar.get(),duration)
def light(self):
command2(STOP,self.lightVar.get())
def wrist(self, value):
self.elbowScale.set(0)
self.shoulderScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(WRIST_DOWN,self.lightVar.get())
elif int(value)>0:
command2(WRIST_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def elbow(self, value):
self.wristScale.set(0)
self.shoulderScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(ELBOW_DOWN,self.lightVar.get())
elif int(value)>0:
command2(ELBOW_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def shoulder(self, value):
self.wristScale.set(0)
self.elbowScale.set(0)
self.baseScale.set(0)
if int(value)<0:
command2(SHOULDER_DOWN,self.lightVar.get())
elif int(value)>0:
command2(SHOULDER_UP, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
def base(self, value):
self.wristScale.set(0)
self.elbowScale.set(0)
self.shoulderScale.set(0)
if int(value)>0:
command2(BASE_COUNTERCLOCKWISE,self.lightVar.get())
elif int(value)<0:
command2(BASE_CLOCKWISE, self.lightVar.get())
else:
command2(STOP, self.lightVar.get())
window = Window(root)
root.mainloop()
| [
"[email protected]"
] | |
09ae244ad99bc60aff2c70c0e43895b0cff2b546 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/AllenInstitute_dipde/dipde-master/dipde/internals/connection.py | fff691738825b6ce043b658098641e70501c748e | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 6,377 | py | """Module containing Connection class, connections between source and target populations."""
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from dipde.internals import utilities as util
from dipde.internals import ConnectionDistribution
class Connection(object):
'''Class that connects dipde source population to dipde target population.
The Connection class handles all of the details of propogating connection
information between a source population (dipde ExtermalPopulation or
InternalPopulation) and a target population (dipde InternalPopulation).
Handles delay information via a delay queue that is rolled on each timestep,
and reuses connection information by using a ConnectionDistribution object
with a specific signature to avoid duplication among identical connections.
Parameters
----------
source : InternalPopulation or ExternalPopulation
Source population for connection.
target : InternalPopulation
Target population for connection.
nsyn : int
In-degree of connectivity from source to target.
weights : list
Weights defining synaptic distribution (np.ndarray).
probs : list (same length as weights, and sums to 1)
Probabilities corresponding to weights.
delay: float (default=0)
Transmission delay (units: sec).
metadata: Connection metadata, all other kwargs
'''
def __init__(self,
source,
target,
nsyn,
**kwargs):
self.source = source
self.target = target
self.nsyn = nsyn
self.weights = kwargs.pop('weights', None)
self.probs = kwargs.pop('probs', None)
self.delay = float(kwargs.pop('delay', 0))
self.metadata = kwargs
if self.weights != None or self.probs != None:
assert len(self.weights) == len(self.probs)
else:
self.weights, self.probs = util.descretize(kwargs.get('distribution', None),
kwargs.get('N', None),
scale=kwargs.get('scale', None))
assert np.abs(self.probs).sum() == 1
# Defined at runtime:
self.delay_queue = None
self.delay_ind = None
self.simulation = None
def initialize(self):
'''Initialize the connection at the beginning of a simulation.
Calling this method:
1) Initializes a delay queue used to store values of inputs in a last-in-first-out rolling queue.
2) Creates a connection_distribution object for the connection, if a suitable object is not already registered with the simulation-level connection distribution collection.
This method is called by the Simulation object (initialization method),
but can also be called by a user when defining an alternative time
stepping loop.
'''
self.initialize_delay_queue()
self.initialize_connection_distribution()
def initialize_connection_distribution(self):
"""Create connection distribution, if necessary.
If the signature of this connection is already registered to the
simulation-level connection distribution collection, it is associated
with self. If not, it adds the connection distribution to the
collection, and associates it with self.
"""
conn_dist = ConnectionDistribution(self.target.edges, self.weights, self.probs)
conn_dist.simulation = self.simulation
self.simulation.connection_distribution_collection.add_connection_distribution(conn_dist)
self.connection_distribution = self.simulation.connection_distribution_collection[conn_dist.signature]
def initialize_delay_queue(self):
"""Initialiaze a delay queue for the connection.
The delay attribute of the connection defines the transmission delay of
the signal from the souce to the target. Firing rate values from the
source population are held in a queue, discretized by the timestep, that
is rolled over once per timestep. if source is an ExternalPopulation,
the queue is initialized to the firing rate at t=0; if the source is an
InternalPopulation, the queue is initialized to zero.
"""
# Set up delay queue:
self.delay_ind = int(np.round(self.delay/self.simulation.dt))
if self.source.type == 'internal':
self.delay_queue = np.core.numeric.ones(self.delay_ind+1)*self.source.curr_firing_rate
elif self.source.type == 'external':
self.delay_queue = np.core.numeric.zeros(self.delay_ind+1)
for i in range(len(self.delay_queue)):
self.delay_queue[i] = self.source.firing_rate(self.simulation.t - self.simulation.dt*i)
self.delay_queue = self.delay_queue[::-1]
else:
raise Exception('Unrecognized source type: "%s"' % self.source.type) # pragma: no cover
def update(self):
"""Update Connection, called once per timestep."""
self.delay_queue[0] = self.source.curr_firing_rate
self.delay_queue = np.core.numeric.roll(self.delay_queue, -1)
@property
def curr_delayed_firing_rate(self):
"""Current firing rate of the source (float).
Property that accesses the firing rate at the top of the delay queue,
from the source population.
"""
try:
return self.delay_queue[0]
except:
self.initialize_delay_queue()
return self.delay_queue[0]
| [
"[email protected]"
] | |
e109d3f17f54fea2e3df2e88ee17f17aee45f02b | 7c63130ae44e773a51fcd38c5dc3116f46daecd7 | /error/Predicted_Results/test_sample4_4recom_model1.py | 679e8bc3e9c879edd6252e6c0a279f51652e1b91 | [] | no_license | GitHubdeWill/code_recom | a4e8e393592d210b0481f61a3cc89ea475c95153 | 954c334e4abb25aa96786c9efa8f8ca22bc286aa | refs/heads/master | 2020-04-12T15:14:02.395548 | 2018-12-20T12:07:31 | 2018-12-20T12:07:31 | 162,574,531 | 0 | 1 | null | 2019-12-02T14:28:38 | 2018-12-20T12:07:00 | Python | UTF-8 | Python | false | false | 13,213 | py | start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,selftable_of_contentsforforforforforforforforforforforforforforforforforforforforforforforforforforfor])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,notlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlistlist])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() start = [0,0]
end = [7,7]
taken = [
[1,0], [1,1], [1,2], [1,3]
]
queue = []
queue.append([start[0],start[1],-1])
visited = []
maze = []
for i in range(8):
maze.append(['.','.','.','.','.','.','.','.'])
visited.append([0,0,0,0,0,0,0,0])
maze[start[0]][start[1]] = 'S'
maze[end[0]][end[1]] = 'E'
for i in taken:
maze[i[0]][i[1]] = 'X'
while(len(queue) > 0):
point = queue.pop(0)
if end[0] == point[0] and end[1] == point[1]:
print(point[2] + 1)
break
current = point[2] + 1
if point not in taken and visited[point[0]][point[1]]==0:
visited[point[0]][point[1]] = current
for i in range(point[0], -1, -1):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[0], 8):
if [i,point[1]] in taken:
break
if visited[i][point[1]]==0:
queue.append([i,point[1],current])
for i in range(point[1], -1, -1):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i, ])
for i in range(point[1], 8):
if [point[0],i] in taken:
break
if visited[point[0]][i]==0:
queue.append([point[0],i,current])
for i in maze:
for j in i:
print(j, end=' ')
print() | [
"[email protected]"
] | |
1d281f4a7fba806ffa6a3d7a7354d1fbe7f9fd36 | 71d0a467e2c3c534456ec28f4634fb8efd13a145 | /backend/users/migrations/0002_auto_20201018_0819.py | 721547007279241c4f88e7bad632aad2c34a19eb | [] | no_license | crowdbotics-apps/demo-21662 | 4167ddd99751c3f9352e0cd8752177c4442ecd3f | e6a1de517cc1ae79d0fd58506e604f1ec2258307 | refs/heads/master | 2022-12-31T07:43:46.969806 | 2020-10-18T08:19:38 | 2020-10-18T08:19:38 | 305,053,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.2.16 on 2020-10-18 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
7a6d25498f7e955b9ca6d73293ef58f257f2ff44 | 1c72aa6d53c886d8fb8ae41a3e9b9c6c4dd9dc6f | /Semester 1/Week 6/test_module.py | bbc1c68bb40bb4cdfe43201388e9fc2e1ed27b9c | [] | no_license | codebubb/python_course | 74761ce3189d67e3aff964c056aeab27d4e94d4a | 4a6ed4a64e6a726d886add8364c65956d5053fc2 | refs/heads/master | 2021-01-11T03:06:50.519208 | 2016-07-29T10:47:12 | 2016-10-17T10:42:29 | 71,114,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | import text_analysis
str = "The quick brown fox jumped over the lazy dog"
print "The string has a an average word length of", text_analysis.average_length(str.split())
print "The last word is ", text_analysis.last_word(str)
print "The position in the alphabet of the first letter of the last word is", text_analysis.position_in_alphabet(text_analysis.last_word(str)[0])
| [
"[email protected]"
] | |
71ad32d577c4f279a544b8619e23cd51ea7f8543 | 7c0348afd423d4f94d9a205e6aeb2bc9e3b3bc97 | /lib/python3.7/site-packages/joblib/test/test_memory.py | fb83725a58c817aed264024fe8b69a19a19b132d | [] | permissive | manzars/Movie-Recommender-System | 638b6523de0de8ccc1ac5bc1835316fd6e697fd1 | d89a12ff6706bbe323f62531e5341b477e9b64b7 | refs/heads/master | 2020-04-24T16:41:19.733115 | 2019-05-07T09:33:48 | 2019-05-07T09:33:48 | 172,117,645 | 2 | 4 | Apache-2.0 | 2019-02-25T11:24:42 | 2019-02-22T18:45:02 | null | UTF-8 | Python | false | false | 39,966 | py | """
Test the memory module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import shutil
import os
import os.path
import pickle
import sys
import time
import datetime
import pytest
from joblib.memory import Memory
from joblib.memory import MemorizedFunc, NotMemorizedFunc
from joblib.memory import MemorizedResult, NotMemorizedResult
from joblib.memory import _FUNCTION_HASHES
from joblib.memory import register_store_backend, _STORE_BACKENDS
from joblib.memory import _build_func_identifier, _store_backend_factory
from joblib.memory import JobLibCollisionWarning
from joblib.parallel import Parallel, delayed
from joblib._store_backends import StoreBackendBase, FileSystemStoreBackend
from joblib.test.common import with_numpy, np
from joblib.test.common import with_multiprocessing
from joblib.testing import parametrize, raises, warns
from joblib._compat import PY3_OR_LATER
from joblib.hashing import hash
if sys.version_info[:2] >= (3, 4):
import pathlib
###############################################################################
# Module-level variables for the tests
def f(x, y=1):
""" A module-level function for testing purposes.
"""
return x ** 2 + y
###############################################################################
# Helper function for the tests
def check_identity_lazy(func, accumulator, location):
""" Given a function and an accumulator (a list that grows every
time the function is called), check that the function can be
decorated by memory to be a lazy identity.
"""
# Call each function with several arguments, and check that it is
# evaluated only once per argument.
memory = Memory(location=location, verbose=0)
func = memory.cache(func)
for i in range(3):
for _ in range(2):
assert func(i) == i
assert len(accumulator) == i + 1
def corrupt_single_cache_item(memory):
single_cache_item, = memory.store_backend.get_items()
output_filename = os.path.join(single_cache_item.path, 'output.pkl')
with open(output_filename, 'w') as f:
f.write('garbage')
def monkeypatch_cached_func_warn(func, monkeypatch_fixture):
# Need monkeypatch because pytest does not
# capture stdlib logging output (see
# https://github.com/pytest-dev/pytest/issues/2079)
recorded = []
def append_to_record(item):
recorded.append(item)
monkeypatch_fixture.setattr(func, 'warn', append_to_record)
return recorded
###############################################################################
# Tests
def test_memory_integration(tmpdir):
""" Simple test of memory lazy evaluation.
"""
accumulator = list()
# Rmk: this function has the same name than a module-level function,
# thus it serves as a test to see that both are identified
# as different.
def f(l):
accumulator.append(1)
return l
check_identity_lazy(f, accumulator, tmpdir.strpath)
# Now test clearing
for compress in (False, True):
for mmap_mode in ('r', None):
memory = Memory(location=tmpdir.strpath, verbose=10,
mmap_mode=mmap_mode, compress=compress)
# First clear the cache directory, to check that our code can
# handle that
# NOTE: this line would raise an exception, as the database file is
# still open; we ignore the error since we want to test what
# happens if the directory disappears
shutil.rmtree(tmpdir.strpath, ignore_errors=True)
g = memory.cache(f)
g(1)
g.clear(warn=False)
current_accumulator = len(accumulator)
out = g(1)
assert len(accumulator) == current_accumulator + 1
# Also, check that Memory.eval works similarly
assert memory.eval(f, 1) == out
assert len(accumulator) == current_accumulator + 1
# Now do a smoke test with a function defined in __main__, as the name
# mangling rules are more complex
f.__module__ = '__main__'
memory = Memory(location=tmpdir.strpath, verbose=0)
memory.cache(f)(1)
def test_no_memory():
""" Test memory with location=None: no memoize """
accumulator = list()
def ff(l):
accumulator.append(1)
return l
memory = Memory(location=None, verbose=0)
gg = memory.cache(ff)
for _ in range(4):
current_accumulator = len(accumulator)
gg(1)
assert len(accumulator) == current_accumulator + 1
def test_memory_kwarg(tmpdir):
" Test memory with a function with keyword arguments."
accumulator = list()
def g(l=None, m=1):
accumulator.append(1)
return l
check_identity_lazy(g, accumulator, tmpdir.strpath)
memory = Memory(location=tmpdir.strpath, verbose=0)
g = memory.cache(g)
# Smoke test with an explicit keyword argument:
assert g(l=30, m=2) == 30
def test_memory_lambda(tmpdir):
" Test memory with a function with a lambda."
accumulator = list()
def helper(x):
""" A helper function to define l as a lambda.
"""
accumulator.append(1)
return x
l = lambda x: helper(x)
check_identity_lazy(l, accumulator, tmpdir.strpath)
def test_memory_name_collision(tmpdir):
" Check that name collisions with functions will raise warnings"
memory = Memory(location=tmpdir.strpath, verbose=0)
@memory.cache
def name_collision(x):
""" A first function called name_collision
"""
return x
a = name_collision
@memory.cache
def name_collision(x):
""" A second function called name_collision
"""
return x
b = name_collision
with warns(JobLibCollisionWarning) as warninfo:
a(1)
b(1)
assert len(warninfo) == 1
assert "collision" in str(warninfo[0].message)
def test_memory_warning_lambda_collisions(tmpdir):
# Check that multiple use of lambda will raise collisions
memory = Memory(location=tmpdir.strpath, verbose=0)
a = lambda x: x
a = memory.cache(a)
b = lambda x: x + 1
b = memory.cache(b)
with warns(JobLibCollisionWarning) as warninfo:
assert a(0) == 0
assert b(1) == 2
assert a(1) == 1
# In recent Python versions, we can retrieve the code of lambdas,
# thus nothing is raised
assert len(warninfo) == 4
def test_memory_warning_collision_detection(tmpdir):
# Check that collisions impossible to detect will raise appropriate
# warnings.
memory = Memory(location=tmpdir.strpath, verbose=0)
a1 = eval('lambda x: x')
a1 = memory.cache(a1)
b1 = eval('lambda x: x+1')
b1 = memory.cache(b1)
with warns(JobLibCollisionWarning) as warninfo:
a1(1)
b1(1)
a1(0)
assert len(warninfo) == 2
assert "cannot detect" in str(warninfo[0].message).lower()
def test_memory_partial(tmpdir):
" Test memory with functools.partial."
accumulator = list()
def func(x, y):
""" A helper function to define l as a lambda.
"""
accumulator.append(1)
return y
import functools
function = functools.partial(func, 1)
check_identity_lazy(function, accumulator, tmpdir.strpath)
def test_memory_eval(tmpdir):
" Smoke test memory with a function with a function defined in an eval."
memory = Memory(location=tmpdir.strpath, verbose=0)
m = eval('lambda x: x')
mm = memory.cache(m)
assert mm(1) == 1
def count_and_append(x=[]):
""" A function with a side effect in its arguments.
Return the lenght of its argument and append one element.
"""
len_x = len(x)
x.append(None)
return len_x
def test_argument_change(tmpdir):
""" Check that if a function has a side effect in its arguments, it
should use the hash of changing arguments.
"""
memory = Memory(location=tmpdir.strpath, verbose=0)
func = memory.cache(count_and_append)
# call the function for the first time, is should cache it with
# argument x=[]
assert func() == 0
# the second time the argument is x=[None], which is not cached
# yet, so the functions should be called a second time
assert func() == 1
@with_numpy
@parametrize('mmap_mode', [None, 'r'])
def test_memory_numpy(tmpdir, mmap_mode):
" Test memory with a function with numpy arrays."
accumulator = list()
def n(l=None):
accumulator.append(1)
return l
memory = Memory(location=tmpdir.strpath, mmap_mode=mmap_mode,
verbose=0)
cached_n = memory.cache(n)
rnd = np.random.RandomState(0)
for i in range(3):
a = rnd.random_sample((10, 10))
for _ in range(3):
assert np.all(cached_n(a) == a)
assert len(accumulator) == i + 1
@with_numpy
def test_memory_numpy_check_mmap_mode(tmpdir, monkeypatch):
"""Check that mmap_mode is respected even at the first call"""
memory = Memory(location=tmpdir.strpath, mmap_mode='r', verbose=0)
@memory.cache()
def twice(a):
return a * 2
a = np.ones(3)
b = twice(a)
c = twice(a)
assert isinstance(c, np.memmap)
assert c.mode == 'r'
assert isinstance(b, np.memmap)
assert b.mode == 'r'
# Corrupts the file, Deleting b and c mmaps
# is necessary to be able edit the file
del b
del c
corrupt_single_cache_item(memory)
# Make sure that corrupting the file causes recomputation and that
# a warning is issued.
recorded_warnings = monkeypatch_cached_func_warn(twice, monkeypatch)
d = twice(a)
assert len(recorded_warnings) == 1
exception_msg = 'Exception while loading results'
assert exception_msg in recorded_warnings[0]
# Asserts that the recomputation returns a mmap
assert isinstance(d, np.memmap)
assert d.mode == 'r'
def test_memory_exception(tmpdir):
""" Smoketest the exception handling of Memory.
"""
memory = Memory(location=tmpdir.strpath, verbose=0)
class MyException(Exception):
pass
@memory.cache
def h(exc=0):
if exc:
raise MyException
# Call once, to initialise the cache
h()
for _ in range(3):
# Call 3 times, to be sure that the Exception is always raised
with raises(MyException):
h(1)
def test_memory_ignore(tmpdir):
" Test the ignore feature of memory "
memory = Memory(location=tmpdir.strpath, verbose=0)
accumulator = list()
@memory.cache(ignore=['y'])
def z(x, y=1):
accumulator.append(1)
assert z.ignore == ['y']
z(0, y=1)
assert len(accumulator) == 1
z(0, y=1)
assert len(accumulator) == 1
z(0, y=2)
assert len(accumulator) == 1
def test_memory_args_as_kwargs(tmpdir):
"""Non-regression test against 0.12.0 changes.
https://github.com/joblib/joblib/pull/751
"""
memory = Memory(location=tmpdir.strpath, verbose=0)
@memory.cache
def plus_one(a):
return a + 1
# It's possible to call a positional arg as a kwarg.
assert plus_one(1) == 2
assert plus_one(a=1) == 2
# However, a positional argument that joblib hadn't seen
# before would cause a failure if it was passed as a kwarg.
assert plus_one(a=2) == 3
@parametrize('ignore, verbose, mmap_mode', [(['x'], 100, 'r'),
([], 10, None)])
def test_partial_decoration(tmpdir, ignore, verbose, mmap_mode):
"Check cache may be called with kwargs before decorating"
memory = Memory(location=tmpdir.strpath, verbose=0)
@memory.cache(ignore=ignore, verbose=verbose, mmap_mode=mmap_mode)
def z(x):
pass
assert z.ignore == ignore
assert z._verbose == verbose
assert z.mmap_mode == mmap_mode
def test_func_dir(tmpdir):
# Test the creation of the memory cache directory for the function.
memory = Memory(location=tmpdir.strpath, verbose=0)
path = __name__.split('.')
path.append('f')
path = tmpdir.join('joblib', *path).strpath
g = memory.cache(f)
# Test that the function directory is created on demand
func_id = _build_func_identifier(f)
location = os.path.join(g.store_backend.location, func_id)
assert location == path
assert os.path.exists(path)
assert memory.location == os.path.dirname(g.store_backend.location)
with warns(DeprecationWarning) as w:
assert memory.cachedir == g.store_backend.location
assert len(w) == 1
assert "The 'cachedir' attribute has been deprecated" in str(w[-1].message)
# Test that the code is stored.
# For the following test to be robust to previous execution, we clear
# the in-memory store
_FUNCTION_HASHES.clear()
assert not g._check_previous_func_code()
assert os.path.exists(os.path.join(path, 'func_code.py'))
assert g._check_previous_func_code()
# Test the robustness to failure of loading previous results.
func_id, args_id = g._get_output_identifiers(1)
output_dir = os.path.join(g.store_backend.location, func_id, args_id)
a = g(1)
assert os.path.exists(output_dir)
os.remove(os.path.join(output_dir, 'output.pkl'))
assert a == g(1)
def test_persistence(tmpdir):
# Test the memorized functions can be pickled and restored.
memory = Memory(location=tmpdir.strpath, verbose=0)
g = memory.cache(f)
output = g(1)
h = pickle.loads(pickle.dumps(g))
func_id, args_id = h._get_output_identifiers(1)
output_dir = os.path.join(h.store_backend.location, func_id, args_id)
assert os.path.exists(output_dir)
assert output == h.store_backend.load_item([func_id, args_id])
memory2 = pickle.loads(pickle.dumps(memory))
assert memory.store_backend.location == memory2.store_backend.location
# Smoke test that pickling a memory with location=None works
memory = Memory(location=None, verbose=0)
pickle.loads(pickle.dumps(memory))
g = memory.cache(f)
gp = pickle.loads(pickle.dumps(g))
gp(1)
def test_call_and_shelve(tmpdir):
# Test MemorizedFunc outputting a reference to cache.
for func, Result in zip((MemorizedFunc(f, tmpdir.strpath),
NotMemorizedFunc(f),
Memory(location=tmpdir.strpath,
verbose=0).cache(f),
Memory(location=None).cache(f),
),
(MemorizedResult, NotMemorizedResult,
MemorizedResult, NotMemorizedResult)):
assert func(2) == 5
result = func.call_and_shelve(2)
assert isinstance(result, Result)
assert result.get() == 5
result.clear()
with raises(KeyError):
result.get()
result.clear() # Do nothing if there is no cache.
def test_call_and_shelve_argument_hash(tmpdir):
# Verify that a warning is raised when accessing arguments_hash
# attribute from MemorizedResult
func = Memory(location=tmpdir.strpath, verbose=0).cache(f)
result = func.call_and_shelve(2)
assert isinstance(result, MemorizedResult)
with warns(DeprecationWarning) as w:
assert result.argument_hash == result.args_id
assert len(w) == 1
assert "The 'argument_hash' attribute has been deprecated" \
in str(w[-1].message)
def test_call_and_shelve_lazily_load_stored_result(tmpdir):
"""Check call_and_shelve only load stored data if needed."""
test_access_time_file = tmpdir.join('test_access')
test_access_time_file.write('test_access')
test_access_time = os.stat(test_access_time_file.strpath).st_atime
# check file system access time stats resolution is lower than test wait
# timings.
time.sleep(0.5)
assert test_access_time_file.read() == 'test_access'
if test_access_time == os.stat(test_access_time_file.strpath).st_atime:
# Skip this test when access time cannot be retrieved with enough
# precision from the file system (e.g. NTFS on windows).
pytest.skip("filesystem does not support fine-grained access time "
"attribute")
memory = Memory(location=tmpdir.strpath, verbose=0)
func = memory.cache(f)
func_id, argument_hash = func._get_output_identifiers(2)
result_path = os.path.join(memory.store_backend.location,
func_id, argument_hash, 'output.pkl')
assert func(2) == 5
first_access_time = os.stat(result_path).st_atime
time.sleep(1)
# Should not access the stored data
result = func.call_and_shelve(2)
assert isinstance(result, MemorizedResult)
assert os.stat(result_path).st_atime == first_access_time
time.sleep(1)
# Read the stored data => last access time is greater than first_access
assert result.get() == 5
assert os.stat(result_path).st_atime > first_access_time
def test_memorized_pickling(tmpdir):
for func in (MemorizedFunc(f, tmpdir.strpath), NotMemorizedFunc(f)):
filename = tmpdir.join('pickling_test.dat').strpath
result = func.call_and_shelve(2)
with open(filename, 'wb') as fp:
pickle.dump(result, fp)
with open(filename, 'rb') as fp:
result2 = pickle.load(fp)
assert result2.get() == result.get()
os.remove(filename)
def test_memorized_repr(tmpdir):
func = MemorizedFunc(f, tmpdir.strpath)
result = func.call_and_shelve(2)
func2 = MemorizedFunc(f, tmpdir.strpath)
result2 = func2.call_and_shelve(2)
assert result.get() == result2.get()
assert repr(func) == repr(func2)
# Smoke test with NotMemorizedFunc
func = NotMemorizedFunc(f)
repr(func)
repr(func.call_and_shelve(2))
# Smoke test for message output (increase code coverage)
func = MemorizedFunc(f, tmpdir.strpath, verbose=11, timestamp=time.time())
result = func.call_and_shelve(11)
result.get()
func = MemorizedFunc(f, tmpdir.strpath, verbose=11)
result = func.call_and_shelve(11)
result.get()
func = MemorizedFunc(f, tmpdir.strpath, verbose=5, timestamp=time.time())
result = func.call_and_shelve(11)
result.get()
func = MemorizedFunc(f, tmpdir.strpath, verbose=5)
result = func.call_and_shelve(11)
result.get()
def test_memory_file_modification(capsys, tmpdir, monkeypatch):
# Test that modifying a Python file after loading it does not lead to
# Recomputation
dir_name = tmpdir.mkdir('tmp_import').strpath
filename = os.path.join(dir_name, 'tmp_joblib_.py')
content = 'def f(x):\n print(x)\n return x\n'
with open(filename, 'w') as module_file:
module_file.write(content)
# Load the module:
monkeypatch.syspath_prepend(dir_name)
import tmp_joblib_ as tmp
memory = Memory(location=tmpdir.strpath, verbose=0)
f = memory.cache(tmp.f)
# First call f a few times
f(1)
f(2)
f(1)
# Now modify the module where f is stored without modifying f
with open(filename, 'w') as module_file:
module_file.write('\n\n' + content)
# And call f a couple more times
f(1)
f(1)
# Flush the .pyc files
shutil.rmtree(dir_name)
os.mkdir(dir_name)
# Now modify the module where f is stored, modifying f
content = 'def f(x):\n print("x=%s" % x)\n return x\n'
with open(filename, 'w') as module_file:
module_file.write(content)
# And call f more times prior to reloading: the cache should not be
# invalidated at this point as the active function definition has not
# changed in memory yet.
f(1)
f(1)
# Now reload
sys.stdout.write('Reloading\n')
sys.modules.pop('tmp_joblib_')
import tmp_joblib_ as tmp
f = memory.cache(tmp.f)
# And call f more times
f(1)
f(1)
out, err = capsys.readouterr()
assert out == '1\n2\nReloading\nx=1\n'
def _function_to_cache(a, b):
# Just a place holder function to be mutated by tests
pass
def _sum(a, b):
return a + b
def _product(a, b):
return a * b
def test_memory_in_memory_function_code_change(tmpdir):
_function_to_cache.__code__ = _sum.__code__
memory = Memory(location=tmpdir.strpath, verbose=0)
f = memory.cache(_function_to_cache)
assert f(1, 2) == 3
assert f(1, 2) == 3
with warns(JobLibCollisionWarning):
# Check that inline function modification triggers a cache invalidation
_function_to_cache.__code__ = _product.__code__
assert f(1, 2) == 2
assert f(1, 2) == 2
def test_clear_memory_with_none_location():
memory = Memory(location=None)
memory.clear()
if PY3_OR_LATER:
# Avoid flake8 F821 "undefined name" warning. func_with_kwonly_args and
# func_with_signature are redefined in the exec statement a few lines below
def func_with_kwonly_args():
pass
def func_with_signature():
pass
# exec is needed to define a function with a keyword-only argument and a
# function with signature while avoiding a SyntaxError on Python 2
exec("""
def func_with_kwonly_args(a, b, *, kw1='kw1', kw2='kw2'):
return a, b, kw1, kw2
def func_with_signature(a: int, b: float) -> float:
return a + b
""")
def test_memory_func_with_kwonly_args(tmpdir):
memory = Memory(location=tmpdir.strpath, verbose=0)
func_cached = memory.cache(func_with_kwonly_args)
assert func_cached(1, 2, kw1=3) == (1, 2, 3, 'kw2')
# Making sure that providing a keyword-only argument by
# position raises an exception
with raises(ValueError) as excinfo:
func_cached(1, 2, 3, kw2=4)
excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
"parameter")
# Keyword-only parameter passed by position with cached call
# should still raise ValueError
func_cached(1, 2, kw1=3, kw2=4)
with raises(ValueError) as excinfo:
func_cached(1, 2, 3, kw2=4)
excinfo.match("Keyword-only parameter 'kw1' was passed as positional "
"parameter")
# Test 'ignore' parameter
func_cached = memory.cache(func_with_kwonly_args, ignore=['kw2'])
assert func_cached(1, 2, kw1=3, kw2=4) == (1, 2, 3, 4)
assert func_cached(1, 2, kw1=3, kw2='ignored') == (1, 2, 3, 4)
def test_memory_func_with_signature(tmpdir):
memory = Memory(location=tmpdir.strpath, verbose=0)
func_cached = memory.cache(func_with_signature)
assert func_cached(1, 2.) == 3.
def _setup_toy_cache(tmpdir, num_inputs=10):
memory = Memory(location=tmpdir.strpath, verbose=0)
@memory.cache()
def get_1000_bytes(arg):
return 'a' * 1000
inputs = list(range(num_inputs))
for arg in inputs:
get_1000_bytes(arg)
func_id = _build_func_identifier(get_1000_bytes)
hash_dirnames = [get_1000_bytes._get_output_identifiers(arg)[1]
for arg in inputs]
full_hashdirs = [os.path.join(get_1000_bytes.store_backend.location,
func_id, dirname)
for dirname in hash_dirnames]
return memory, full_hashdirs, get_1000_bytes
def test__get_items(tmpdir):
memory, expected_hash_dirs, _ = _setup_toy_cache(tmpdir)
items = memory.store_backend.get_items()
hash_dirs = [ci.path for ci in items]
assert set(hash_dirs) == set(expected_hash_dirs)
def get_files_size(directory):
full_paths = [os.path.join(directory, fn)
for fn in os.listdir(directory)]
return sum(os.path.getsize(fp) for fp in full_paths)
expected_hash_cache_sizes = [get_files_size(hash_dir)
for hash_dir in hash_dirs]
hash_cache_sizes = [ci.size for ci in items]
assert hash_cache_sizes == expected_hash_cache_sizes
output_filenames = [os.path.join(hash_dir, 'output.pkl')
for hash_dir in hash_dirs]
expected_last_accesses = [
datetime.datetime.fromtimestamp(os.path.getatime(fn))
for fn in output_filenames]
last_accesses = [ci.last_access for ci in items]
assert last_accesses == expected_last_accesses
def test__get_items_to_delete(tmpdir):
memory, expected_hash_cachedirs, _ = _setup_toy_cache(tmpdir)
items = memory.store_backend.get_items()
# bytes_limit set to keep only one cache item (each hash cache
# folder is about 1000 bytes + metadata)
items_to_delete = memory.store_backend._get_items_to_delete('2K')
nb_hashes = len(expected_hash_cachedirs)
assert set.issubset(set(items_to_delete), set(items))
assert len(items_to_delete) == nb_hashes - 1
# Sanity check bytes_limit=2048 is the same as bytes_limit='2K'
items_to_delete_2048b = memory.store_backend._get_items_to_delete(2048)
assert sorted(items_to_delete) == sorted(items_to_delete_2048b)
# bytes_limit greater than the size of the cache
items_to_delete_empty = memory.store_backend._get_items_to_delete('1M')
assert items_to_delete_empty == []
# All the cache items need to be deleted
bytes_limit_too_small = 500
items_to_delete_500b = memory.store_backend._get_items_to_delete(
bytes_limit_too_small)
assert set(items_to_delete_500b), set(items)
# Test LRU property: surviving cache items should all have a more
# recent last_access that the ones that have been deleted
items_to_delete_6000b = memory.store_backend._get_items_to_delete(6000)
surviving_items = set(items).difference(items_to_delete_6000b)
assert (max(ci.last_access for ci in items_to_delete_6000b) <=
min(ci.last_access for ci in surviving_items))
def test_memory_reduce_size(tmpdir):
memory, _, _ = _setup_toy_cache(tmpdir)
ref_cache_items = memory.store_backend.get_items()
# By default memory.bytes_limit is None and reduce_size is a noop
memory.reduce_size()
cache_items = memory.store_backend.get_items()
assert sorted(ref_cache_items) == sorted(cache_items)
# No cache items deleted if bytes_limit greater than the size of
# the cache
memory.bytes_limit = '1M'
memory.reduce_size()
cache_items = memory.store_backend.get_items()
assert sorted(ref_cache_items) == sorted(cache_items)
# bytes_limit is set so that only two cache items are kept
memory.bytes_limit = '3K'
memory.reduce_size()
cache_items = memory.store_backend.get_items()
assert set.issubset(set(cache_items), set(ref_cache_items))
assert len(cache_items) == 2
# bytes_limit set so that no cache item is kept
bytes_limit_too_small = 500
memory.bytes_limit = bytes_limit_too_small
memory.reduce_size()
cache_items = memory.store_backend.get_items()
assert cache_items == []
def test_memory_clear(tmpdir):
memory, _, _ = _setup_toy_cache(tmpdir)
memory.clear()
assert os.listdir(memory.store_backend.location) == []
def fast_func_with_complex_output():
complex_obj = ['a' * 1000] * 1000
return complex_obj
def fast_func_with_conditional_complex_output(complex_output=True):
complex_obj = {str(i): i for i in range(int(1e5))}
return complex_obj if complex_output else 'simple output'
@with_multiprocessing
def test_cached_function_race_condition_when_persisting_output(tmpdir, capfd):
# Test race condition where multiple processes are writing into
# the same output.pkl. See
# https://github.com/joblib/joblib/issues/490 for more details.
memory = Memory(location=tmpdir.strpath)
func_cached = memory.cache(fast_func_with_complex_output)
Parallel(n_jobs=2)(delayed(func_cached)() for i in range(3))
stdout, stderr = capfd.readouterr()
# Checking both stdout and stderr (ongoing PR #434 may change
# logging destination) to make sure there is no exception while
# loading the results
exception_msg = 'Exception while loading results'
assert exception_msg not in stdout
assert exception_msg not in stderr
@with_multiprocessing
def test_cached_function_race_condition_when_persisting_output_2(tmpdir,
capfd):
# Test race condition in first attempt at solving
# https://github.com/joblib/joblib/issues/490. The race condition
# was due to the delay between seeing the cache directory created
# (interpreted as the result being cached) and the output.pkl being
# pickled.
memory = Memory(location=tmpdir.strpath)
func_cached = memory.cache(fast_func_with_conditional_complex_output)
Parallel(n_jobs=2)(delayed(func_cached)(True if i % 2 == 0 else False)
for i in range(3))
stdout, stderr = capfd.readouterr()
# Checking both stdout and stderr (ongoing PR #434 may change
# logging destination) to make sure there is no exception while
# loading the results
exception_msg = 'Exception while loading results'
assert exception_msg not in stdout
assert exception_msg not in stderr
def test_memory_recomputes_after_an_error_while_loading_results(
tmpdir, monkeypatch):
memory = Memory(location=tmpdir.strpath)
def func(arg):
# This makes sure that the timestamp returned by two calls of
# func are different. This is needed on Windows where
# time.time resolution may not be accurate enough
time.sleep(0.01)
return arg, time.time()
cached_func = memory.cache(func)
input_arg = 'arg'
arg, timestamp = cached_func(input_arg)
# Make sure the function is correctly cached
assert arg == input_arg
# Corrupting output.pkl to make sure that an error happens when
# loading the cached result
corrupt_single_cache_item(memory)
# Make sure that corrupting the file causes recomputation and that
# a warning is issued.
recorded_warnings = monkeypatch_cached_func_warn(cached_func, monkeypatch)
recomputed_arg, recomputed_timestamp = cached_func(arg)
assert len(recorded_warnings) == 1
exception_msg = 'Exception while loading results'
assert exception_msg in recorded_warnings[0]
assert recomputed_arg == arg
assert recomputed_timestamp > timestamp
# Corrupting output.pkl to make sure that an error happens when
# loading the cached result
corrupt_single_cache_item(memory)
reference = cached_func.call_and_shelve(arg)
try:
reference.get()
raise AssertionError(
"It normally not possible to load a corrupted"
" MemorizedResult"
)
except KeyError as e:
message = "is corrupted"
assert message in str(e.args)
def test_deprecated_cachedir_behaviour(tmpdir):
# verify the right deprecation warnings are raised when using cachedir
# option instead of new location parameter.
with warns(None) as w:
memory = Memory(cachedir=tmpdir.strpath, verbose=0)
assert memory.store_backend.location.startswith(tmpdir.strpath)
assert len(w) == 1
assert "The 'cachedir' parameter has been deprecated" in str(w[-1].message)
with warns(None) as w:
memory = Memory()
assert memory.cachedir is None
assert len(w) == 1
assert "The 'cachedir' attribute has been deprecated" in str(w[-1].message)
error_regex = """You set both "location='.+ and "cachedir='.+"""
with raises(ValueError, match=error_regex):
memory = Memory(location=tmpdir.strpath, cachedir=tmpdir.strpath,
verbose=0)
class IncompleteStoreBackend(StoreBackendBase):
"""This backend cannot be instanciated and should raise a TypeError."""
pass
class DummyStoreBackend(StoreBackendBase):
"""A dummy store backend that does nothing."""
def _open_item(self, *args, **kwargs):
"""Open an item on store."""
"Does nothing"
def _item_exists(self, location):
"""Check if an item location exists."""
"Does nothing"
def _move_item(self, src, dst):
"""Move an item from src to dst in store."""
"Does nothing"
def create_location(self, location):
"""Create location on store."""
"Does nothing"
def exists(self, obj):
"""Check if an object exists in the store"""
return False
def clear_location(self, obj):
"""Clear object on store"""
"Does nothing"
def get_items(self):
"""Returns the whole list of items available in cache."""
return []
def configure(self, location, *args, **kwargs):
"""Configure the store"""
"Does nothing"
@parametrize("invalid_prefix", [None, dict(), list()])
def test_register_invalid_store_backends_key(invalid_prefix):
# verify the right exceptions are raised when passing a wrong backend key.
with raises(ValueError) as excinfo:
register_store_backend(invalid_prefix, None)
excinfo.match(r'Store backend name should be a string*')
def test_register_invalid_store_backends_object():
# verify the right exceptions are raised when passing a wrong backend
# object.
with raises(ValueError) as excinfo:
register_store_backend("fs", None)
excinfo.match(r'Store backend should inherit StoreBackendBase*')
def test_memory_default_store_backend():
# test an unknow backend falls back into a FileSystemStoreBackend
with raises(TypeError) as excinfo:
Memory(location='/tmp/joblib', backend='unknown')
excinfo.match(r"Unknown location*")
def test_warning_on_unknown_location_type():
class NonSupportedLocationClass:
pass
unsupported_location = NonSupportedLocationClass()
with warns(UserWarning) as warninfo:
_store_backend_factory("local", location=unsupported_location)
expected_mesage = ("Instanciating a backend using a "
"NonSupportedLocationClass as a location is not "
"supported by joblib")
assert expected_mesage in str(warninfo[0].message)
def test_instanciate_incomplete_store_backend():
# Verify that registering an external incomplete store backend raises an
# exception when one tries to instanciate it.
backend_name = "isb"
register_store_backend(backend_name, IncompleteStoreBackend)
assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items()
with raises(TypeError) as excinfo:
_store_backend_factory(backend_name, "fake_location")
excinfo.match(r"Can't instantiate abstract class "
"IncompleteStoreBackend with abstract methods*")
def test_dummy_store_backend():
# Verify that registering an external store backend works.
backend_name = "dsb"
register_store_backend(backend_name, DummyStoreBackend)
assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items()
backend_obj = _store_backend_factory(backend_name, "dummy_location")
assert isinstance(backend_obj, DummyStoreBackend)
@pytest.mark.skipif(sys.version_info[:2] < (3, 4),
reason="pathlib is available for python versions >= 3.4")
def test_instanciate_store_backend_with_pathlib_path():
# Instanciate a FileSystemStoreBackend using a pathlib.Path object
path = pathlib.Path("some_folder")
backend_obj = _store_backend_factory("local", path)
assert backend_obj.location == "some_folder"
def test_filesystem_store_backend_repr(tmpdir):
# Verify string representation of a filesystem store backend.
repr_pattern = 'FileSystemStoreBackend(location="{location}")'
backend = FileSystemStoreBackend()
assert backend.location is None
repr(backend) # Should not raise an exception
assert str(backend) == repr_pattern.format(location=None)
# backend location is passed explicitely via the configure method (called
# by the internal _store_backend_factory function)
backend.configure(tmpdir.strpath)
assert str(backend) == repr_pattern.format(location=tmpdir.strpath)
repr(backend) # Should not raise an exception
def test_memory_objects_repr(tmpdir):
# Verify printable reprs of MemorizedResult, MemorizedFunc and Memory.
def my_func(a, b):
return a + b
memory = Memory(location=tmpdir.strpath, verbose=0)
memorized_func = memory.cache(my_func)
memorized_func_repr = 'MemorizedFunc(func={func}, location={location})'
assert str(memorized_func) == memorized_func_repr.format(
func=my_func,
location=memory.store_backend.location)
memorized_result = memorized_func.call_and_shelve(42, 42)
memorized_result_repr = ('MemorizedResult(location="{location}", '
'func="{func}", args_id="{args_id}")')
assert str(memorized_result) == memorized_result_repr.format(
location=memory.store_backend.location,
func=memorized_result.func_id,
args_id=memorized_result.args_id)
assert str(memory) == 'Memory(location={location})'.format(
location=memory.store_backend.location)
def test_memorized_result_pickle(tmpdir):
# Verify a MemoryResult object can be pickled/depickled. Non regression
# test introduced following issue
# https://github.com/joblib/joblib/issues/747
memory = Memory(location=tmpdir.strpath)
@memory.cache
def g(x):
return x**2
memorized_result = g.call_and_shelve(4)
memorized_result_pickle = pickle.dumps(memorized_result)
memorized_result_loads = pickle.loads(memorized_result_pickle)
assert memorized_result.store_backend.location == \
memorized_result_loads.store_backend.location
assert memorized_result.func == memorized_result_loads.func
assert memorized_result.args_id == memorized_result_loads.args_id
assert str(memorized_result) == str(memorized_result_loads)
def compare(left, right, ignored_attrs=None):
if ignored_attrs is None:
ignored_attrs = []
left_vars = vars(left)
right_vars = vars(right)
assert set(left_vars.keys()) == set(right_vars.keys())
for attr in left_vars.keys():
if attr in ignored_attrs:
continue
assert left_vars[attr] == right_vars[attr]
@pytest.mark.parametrize('memory_kwargs',
[{'compress': 3, 'verbose': 2},
{'mmap_mode': 'r', 'verbose': 5, 'bytes_limit': 1e6,
'backend_options': {'parameter': 'unused'}}])
def test_memory_pickle_dump_load(tmpdir, memory_kwargs):
memory = Memory(location=tmpdir.strpath, **memory_kwargs)
memory_reloaded = pickle.loads(pickle.dumps(memory))
# Compare Memory instance before and after pickle roundtrip
compare(memory.store_backend, memory_reloaded.store_backend)
compare(memory, memory_reloaded,
ignored_attrs=set(['store_backend', 'timestamp']))
assert hash(memory) == hash(memory_reloaded)
func_cached = memory.cache(f)
func_cached_reloaded = pickle.loads(pickle.dumps(func_cached))
# Compare MemorizedFunc instance before/after pickle roundtrip
compare(func_cached.store_backend, func_cached_reloaded.store_backend)
compare(func_cached, func_cached_reloaded,
ignored_attrs=set(['store_backend', 'timestamp']))
assert hash(func_cached) == hash(func_cached_reloaded)
# Compare MemorizedResult instance before/after pickle roundtrip
memorized_result = func_cached.call_and_shelve(1)
memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result))
compare(memorized_result.store_backend,
memorized_result_reloaded.store_backend)
compare(memorized_result, memorized_result_reloaded,
ignored_attrs=set(['store_backend', 'timestamp']))
assert hash(memorized_result) == hash(memorized_result_reloaded)
| [
"[email protected]"
] | |
c92417f537f0d13073d8166e8e78b658f05ed99b | a210132dd3e87772b4e1f3ef246ea8da4d8646e7 | /cmdb1_demo1/cmdb1_demo1_app1/migrations/0030_auto_20190112_1339.py | 5a0b6b06c4f85f641cf0c6107f747de923245c3d | [] | no_license | hongmalong/cmdb1 | 8574b21e81fb1833ec75d96fa771073ab9c360b3 | 8f2ba9afa549e24d9b406ff5b00a76dec31dd5ac | refs/heads/master | 2021-07-01T14:29:31.278517 | 2019-04-18T05:04:54 | 2019-04-18T05:04:54 | 124,845,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,148 | py | # Generated by Django 2.0.3 on 2019-01-12 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmdb1_demo1_app1', '0029_auto_20190112_1337'),
]
operations = [
migrations.CreateModel(
name='EventTable',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID')),
('eventId', models.CharField(max_length=200, unique=True)),
('status', models.CharField(default=None, max_length=200, null=True)),
],
),
migrations.AlterField(
model_name='cabinettable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='companytable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='deploylogtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='enviromenttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='equipmenttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='equipmenttypetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='historytable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='logpathtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='nodetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='occupationtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='porttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='privatetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='projecttable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='providertable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='serverroomtable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetypetable',
name='ctime',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
migrations.AlterField(
model_name='servicetypetable',
name='portNumber',
field=models.CharField(default='20190112133920', max_length=49, null=True),
),
]
| [
"[email protected]"
] | |
3ed8f4ab078ac3c57086c22a5dd0db94514f07af | cea30aead7f0b529ee072c1bcab2896777e1408d | /PreprocessingCropsData/venv/Lib/site-packages/sklearn/datasets/_covtype.py | f705af97d2dfeb6b3a67a9d38481d1b500f0f750 | [] | no_license | pgj9702/FarmSolution | 3730ab3ca983b335ed48a60935c5fa6e3983cbb1 | a8cacc45b8519e79b51ab65b9539a01f5006e64f | refs/heads/master | 2023-03-30T15:41:10.312044 | 2021-03-31T08:47:23 | 2021-03-31T08:47:23 | 334,019,778 | 0 | 1 | null | 2021-02-22T09:32:57 | 2021-01-29T02:52:46 | Python | UTF-8 | Python | false | false | 6,646 | py | """Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import dirname, exists, join
from os import remove, makedirs
import numpy as np
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ._base import _pkl_filepath
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
# The original area_data can be found in:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename='covtype.area_data.gz',
url='https://ndownloader.figshare.com/files/5976039',
checksum=('614360d0257557dd1792834a85a1cdeb'
'fadc3c4f30b011d56afee7ffb5b15771'))
logger = logging.getLogger(__name__)
# Column names reference:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
FEATURE_NAMES = ["Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points"]
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)]
TARGET_NAMES = ["Cover_Type"]
@_deprecate_positional_args
def fetch_covtype(*, data_home=None, download_if_missing=True,
random_state=None, shuffle=False, return_X_y=False,
as_frame=False):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the preprocessing_datasets. By default
all scikit-learn area_data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the area_data is not locally available
instead of trying to download the area_data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(area_data.area_data, area_data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the area_data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`area_data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
area_data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 53)
Only present when `as_frame=True`. Contains `area_data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(area_data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
if not exists(covtype_dir):
makedirs(covtype_dir)
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',')
# delete archive
remove(archive_path)
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'covtype.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr)
| [
"[email protected]"
] | |
3e57cd7b2ce4397e6d921fac46ff9af95062da80 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/ocaml/coq.py | 9a02ca553f2d115228f920a99f746e35bcdf6ff9 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | from ..base import GnuRecipe
class CoqRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(CoqRecipe, self).__init__(*args, **kwargs)
self.sha256 = '6e3c3cf5c8e2b0b760dc52738e2e849f' \
'3a8c630869659ecc0cf41413fcee81df'
self.name = 'coq'
self.version = '8.6'
self.depends = ['ocaml-findlib', 'ocaml-camlp5']
self.url = 'https://coq.inria.fr/distrib/V$version/files/' \
'coq-$version.tar.gz'
self.configure_args = self.shell_args + [
'configure',
'-prefix',
self.prefix_dir]
def need_configure(self):
return True
| [
"[email protected]"
] | |
001444d97273fc955bdf648d43cc6b89ce9194c9 | 3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7 | /mounth001/day24/text/file_write_2.py | 57e6dfeb11a4a1e289915f4535f8acaa37be0ae6 | [] | no_license | Molly-l/66 | 4bfe2f93e726d3cc059222c93a2bb3460b21ad78 | fae24a968f590060522d30f1b278fcfcdab8b36f | refs/heads/master | 2020-09-28T12:50:18.590794 | 2019-11-27T04:42:28 | 2019-11-27T04:42:28 | 226,782,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | '''(r文件不存在报错,w,a会新建)w清空重写,a追加'''
f=open('w01.txt','ab')
# f.write('111')
bytes1='dfyt'.encode()#字符串转换二进制
f.write(bytes1)
f.close()
| [
"[email protected]"
] | |
a269f90cf7a7c85403e4d0201d5daf68e1d31eb9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2652/60705/266739.py | 3f6d30e69a9b783eadfdca3eaeda2a8df1e79ad9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | [N, C, F] = list(map(int, input().split(" ")))
print("N:" + str(N))
print("C:" + str(C))
print("F:" + str(F))
grades = {}
prices = {}
for students in range(0, C):
[grade, price] = list(map(int, input().split(" ")))
grades[students] = grade
prices[students] = price
print(grades)
print(prices)
li = sorted(grades.items(), key=lambda k: k[1])
print(li)
index = C - 1
while index >= N-1:
p = 0
for i in range(index, index - N, -1):
print(index)
p += prices[li[index][0]]
print("p="+str(p))
if p < F:
break
index -= 1
if index < N - 1:
print(-1)
else:
grad = []
for i in range(index, index-N, -1):
grad.append(grades[li[i][0]])
print(grad)
grad.sort()
if N % 2 == 1:
print(grad[N // 2])
else:
print(int((grad[N//2] + grad[N//2 - 1]) / 2))
| [
"[email protected]"
] | |
bc4300f15098e0e87232005f0927580177ee0c50 | 8d3b5db62ec817a81d97d2bf741166da20fecffc | /tensorforce/core/policies/action_value.py | 4b09436aff4b0089f8fc12aa9de65b535dfcc954 | [
"Apache-2.0"
] | permissive | stheid/tensorforce | 6ffe918054d1b298c0c4cf4de9a669d500c0983d | c4cce421be650d7500125b793b59aaeb92ffdf51 | refs/heads/master | 2022-11-23T19:15:39.336310 | 2020-06-11T16:04:08 | 2020-06-11T16:04:08 | 271,549,628 | 0 | 0 | Apache-2.0 | 2020-06-11T16:04:09 | 2020-06-11T13:12:16 | null | UTF-8 | Python | false | false | 5,112 | py | # Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from collections import OrderedDict
import tensorflow as tf
from tensorforce import util
from tensorforce.core.policies import Policy
class ActionValue(Policy):
"""
Base class for action-value-based policies.
Args:
name (string): Module name
(<span style="color:#0000C0"><b>internal use</b></span>).
states_spec (specification): States specification
(<span style="color:#0000C0"><b>internal use</b></span>).
actions_spec (specification): Actions specification
(<span style="color:#0000C0"><b>internal use</b></span>).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def tf_act(self, states, internals, auxiliaries, return_internals):
assert return_internals
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
actions = OrderedDict()
for name, spec, action_values in util.zip_items(self.actions_spec, actions_values):
actions[name] = tf.math.argmax(
input=action_values, axis=-1, output_type=util.tf_dtype(spec['type'])
)
return actions
def tf_states_value(
self, states, internals, auxiliaries, reduced=True, include_per_action=False
):
states_values = self.states_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
for name, spec, states_value in util.zip_items(self.actions_spec, states_values):
states_values[name] = tf.reshape(
tensor=states_value, shape=(-1, util.product(xs=spec['shape']))
)
states_value = tf.concat(values=tuple(states_values.values()), axis=1)
if reduced:
states_value = tf.math.reduce_mean(input_tensor=states_value, axis=1)
if include_per_action:
for name in self.actions_spec:
states_values[name] = tf.math.reduce_mean(
input_tensor=states_values[name], axis=1
)
if include_per_action:
states_values['*'] = states_value
return states_values
else:
return states_value
def tf_actions_value(
self, states, internals, auxiliaries, actions, reduced=True, include_per_action=False
):
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries, actions=actions
)
for name, spec, actions_value in util.zip_items(self.actions_spec, actions_values):
actions_values[name] = tf.reshape(
tensor=actions_value, shape=(-1, util.product(xs=spec['shape']))
)
actions_value = tf.concat(values=tuple(actions_values.values()), axis=1)
if reduced:
actions_value = tf.math.reduce_mean(input_tensor=actions_value, axis=1)
if include_per_action:
for name in self.actions_spec:
actions_values[name] = tf.math.reduce_mean(
input_tensor=actions_values[name], axis=1
)
if include_per_action:
actions_values['*'] = actions_value
return actions_values
else:
return actions_value
def tf_states_values(self, states, internals, auxiliaries):
if not all(spec['type'] == 'int' for spec in self.actions_spec.values()):
raise NotImplementedError
actions_values = self.actions_values(
states=states, internals=internals, auxiliaries=auxiliaries
)
states_values = OrderedDict()
for name, spec, action_values in util.zip_items(self.actions_spec, actions_values):
states_values[name] = tf.math.reduce_max(input_tensor=action_values, axis=-1)
return states_values
def tf_actions_values(self, states, internals, auxiliaries, actions=None):
raise NotImplementedError
| [
"[email protected]"
] | |
10ca4009d3fee037182f4df9155038fb40fe9114 | 93fc75b62e3fb6524f3891daf58772175fee781c | /沈书颖/2017310398-沈书颖-第五次作业-金工17-1/2017310398-沈书颖-第五次作业-金工17-1/10-6 10-7.py | 4555a8454fea0638428be4e32b60903555d42d31 | [] | no_license | jingong171/jingong-homework | 13174a4a7b39b8ae6d5da103cbf0fb40766d59c1 | 542e8781f26676a62538714b92fb0bccdf41b47b | refs/heads/master | 2020-03-29T13:38:34.152280 | 2018-12-17T14:38:08 | 2018-12-17T14:38:08 | 149,974,131 | 8 | 11 | null | 2018-10-08T14:40:58 | 2018-09-23T10:32:35 | Python | UTF-8 | Python | false | false | 385 | py | while True:
number_a=input("请输入一个数字:")
number_b=input("请再输入一个数字:")
#提示用户输入两个数字
try:
answer=int(number_a)+int(number_b)
except ValueError:
print("请输入数字!")
#如果输入的不是数字,则给出提示
else:
print(answer)
#如果是数字就给出答案
| [
"[email protected]"
] | |
33819073c9a333a74a0dd3cd161259efaae88da2 | 44e6e50a4d2e0095e055e59a6250b8ccf327f844 | /morepath/converter.py | a87ca28927e2ff16e91ad410496591cae56631d0 | [] | no_license | iapilgrim/morepath | 14f8f4d457fa9e9a5a2342f659e03e962209f5a0 | f173860661d00f2b3a684d2c512a1741d40cc26a | refs/heads/master | 2020-12-26T03:44:35.396492 | 2014-02-13T08:31:40 | 2014-02-13T08:31:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | from reg.mapping import Map, ClassMapKey
class Converter(object):
"""A converter that knows how to decode from string to object and back.
Used for decoding/encoding URL parameters and path parameters.
"""
def __init__(self, decode, encode=None):
"""Create new converter.
:param decode: function that given string can decode it into object.
:param encode: function that given object can encode it into object.
"""
self.decode = decode
self.encode = encode or unicode
def __eq__(self, other):
return (self.decode is other.decode and
self.encode is other.encode)
def __ne__(self, other):
return not self == other
IDENTITY_CONVERTER = Converter(lambda s: s, lambda s: s)
class ConverterRegistry(object):
"""A registry for converters.
Used to decode/encode URL parameters and path variables used
by the :meth:`morepath.AppBase.path` directive.
Is aware of inheritance.
"""
def __init__(self):
self._map = Map()
def register_converter(self, type, converter):
"""Register a converter for type.
:param type: the Python type for which to register
the converter.
:param converter: a :class:`morepath.Converter` instance.
"""
self._map[ClassMapKey(type)] = converter
def converter_for_type(self, type):
"""Get converter for type.
Is aware of inheritance; if nothing is registered for given
type will return converter registered for base class.
:param type: The type for which to look up the converter.
:returns: a :class:`morepath.Converter` instance.
"""
return self._map.get(ClassMapKey(type))
def converter_for_value(self, v):
"""Get converter for value.
Is aware of inheritance; if nothing is registered for type of
given value will return converter registered for base class.
:param value: The value for which to look up the converter.
:returns: a :class:`morepath.Converter` instance.
"""
if v is None:
return IDENTITY_CONVERTER
return self.converter_for_type(type(v))
| [
"[email protected]"
] | |
8b82b13b6a0ec3017b98b71e4834d9de191a7e53 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python数据分析机器学习部分/机器学习/day04/score.py | 03c4d24bda3a74f02be17657cf046736a6d8e985 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import sklearn.cluster as sc
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x = []
with open('../../data/perf.txt', 'r') as f:
for line in f.readlines():
data = [float(substr) for substr
in line.split(',')]
x.append(data)
x = np.array(x)
clstrs, scores, models = np.arange(2, 11), [], []
for n_clusters in clstrs:
model = sc.KMeans(init='k-means++',
n_clusters=n_clusters)
model.fit(x)
score = sm.silhouette_score(
x, model.labels_, sample_size=len(x),
metric='euclidean')
scores.append(score)
models.append(model)
scores = np.array(scores)
best_index = scores.argmax()
best_clstr = clstrs[best_index]
print(best_clstr)
best_score = scores[best_index]
print(best_score)
best_model = models[best_index]
centers = best_model.cluster_centers_
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
grid_x = np.meshgrid(np.arange(l, r, h),
np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()]
flat_y = best_model.predict(flat_x)
grid_y = flat_y.reshape(grid_x[0].shape)
pred_y = best_model.predict(x)
mp.figure('K-Means Cluster', facecolor='lightgray')
mp.title('K-Means Cluster', fontsize=20)
mp.xlabel('x', fontsize=14)
mp.ylabel('y', fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap='gray')
mp.scatter(x[:, 0], x[:, 1], c=pred_y, cmap='brg', s=60)
mp.scatter(centers[:, 0], centers[:, 1], marker='+',
c='gold', s=1000, linewidth=1)
mp.show()
| [
"[email protected]"
] | |
cfc990e4788553a5ae0cb0ae98b5d5f6046e27fa | 0670d89e5d7b91d86b181e0b6cfdbef8b3b9e9e6 | /p2/api/migrations/0001_initial.py | 59146617b32f664e13e38f1dec9edb666ee6eefe | [
"MIT"
] | permissive | BeryJu/p2 | dfe570afb420843033e519350f5b89e992878a6b | 80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27 | refs/heads/master | 2020-12-04T01:21:21.197822 | 2019-08-29T16:02:21 | 2019-08-29T16:02:21 | 231,549,415 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | # Generated by Django 2.2 on 2019-05-03 17:56
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import p2.api.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('p2_core', '0017_auto_20190503_1755'),
]
operations = [
migrations.CreateModel(
name='APIKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('access_key', models.CharField(default=p2.api.models.get_access_key, max_length=20)),
('secret_key', models.CharField(default=p2.api.models.get_secret_key, max_length=40)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('volume', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='p2_core.Volume')),
],
options={
'verbose_name': 'API Key',
'verbose_name_plural': 'API Keys',
'unique_together': {('access_key', 'secret_key')},
},
),
]
| [
"[email protected]"
] | |
87ed7b1213ff1c5ac14a3b8964f9b62568a734e3 | 350ade9361645f87d96589a0c90c76d8a951832b | /CP4/ICCP4_9.py | d2b59b71c9a9c7a4c57f25ad84303e7d47c670d7 | [] | no_license | dongzeyuan/Practise | becf7c7ca15928213aa22ae15bd8b3f1f9b7dc8b | ecef4466d30c5c9e88e766b4f3df6db24959b9d3 | refs/heads/master | 2021-09-21T02:06:24.629708 | 2018-08-19T08:50:02 | 2018-08-19T08:50:02 | 119,028,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | # coding=UTF-8
for i in range(1,5):
print('*'*7)
print('\n')
for i in range(1, 5):
print('*'*(2*i-1))
print('\n') | [
"[email protected]"
] | |
b02551e689bd26380fb3d8e7424522b62aed0b35 | e35eb92b5ab6547119585004b9eea3cafe948050 | /efsw/conversion/tests/test_views.py | dd84ced35fd62ff5dd91a71f7d2cc9ee4053880a | [] | no_license | einsfr/mmkit | 0a084db85b2cf5ba268e692676095d768733f387 | f12bc2f83254a3123e02abdc105816cc04c438b5 | refs/heads/master | 2020-12-31T05:56:19.287611 | 2016-06-10T05:56:58 | 2016-06-10T05:56:58 | 29,473,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,127 | py | import os
import math
from django.test import TestCase
from django.core import urlresolvers, paginator
from mmkit.conf import settings
from efsw.conversion import models
from efsw.common.utils.testcases import LoginRequiredTestCase, JsonResponseTestCase
from efsw.conversion.fixtures import conversionprofile, conversiontask
from efsw.storage.models import FileStorage
class TaskListTestCase(TestCase):
def test_list(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:list'))
short_count = getattr(settings, 'EFSW_CONVERTER_TASKS_SHORT_LIST_COUNT')
finished_count = models.ConversionTask.objects.filter(
status__in=[models.ConversionTask.STATUS_ERROR, models.ConversionTask.STATUS_COMPLETED,
models.ConversionTask.STATUS_CANCELED]
).count()
finished_count = finished_count if finished_count < short_count else short_count
self.assertEqual(len(response.context['tasks_finished']), finished_count)
self.assertTrue(response.context['tasks_finished_more'] or finished_count <= short_count)
unknown_count = models.ConversionTask.objects.filter(
status=models.ConversionTask.STATUS_UNKNOWN
).count()
unknown_count = unknown_count if unknown_count < short_count else short_count
self.assertEqual(len(response.context['tasks_unknown']), unknown_count)
self.assertTrue(response.context['tasks_unknown_more'] or unknown_count <= short_count)
in_progress_count = models.ConversionTask.objects.filter(
status__in=[models.ConversionTask.STATUS_START_WAITING, models.ConversionTask.STATUS_IN_PROGRESS,
models.ConversionTask.STATUS_STARTED]
).count()
in_progress_count = in_progress_count if in_progress_count < short_count else short_count
self.assertEqual(len(response.context['tasks_in_progress']), in_progress_count)
self.assertTrue(response.context['tasks_in_progress_more'] or in_progress_count <= short_count)
enqueued_count = models.ConversionTask.objects.filter(status=models.ConversionTask.STATUS_ENQUEUED).count()
enqueued_count = enqueued_count if enqueued_count < short_count else short_count
self.assertEqual(len(response.context['tasks_enqueued']), enqueued_count)
self.assertTrue(response.context['tasks_enqueued_more'] or enqueued_count <= short_count)
def test_list_finished(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:list_finished'))
self.assertEqual(
len(response.context['tasks_finished']),
models.ConversionTask.objects.filter(
status__in=[models.ConversionTask.STATUS_ERROR, models.ConversionTask.STATUS_COMPLETED,
models.ConversionTask.STATUS_CANCELED]
).count()
)
def test_list_unknown(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:list_unknown'))
self.assertEqual(
len(response.context['tasks_unknown']),
models.ConversionTask.objects.filter(
status=models.ConversionTask.STATUS_UNKNOWN
).count()
)
def test_list_in_progress(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:list_in_progress'))
self.assertEqual(
len(response.context['tasks_in_progress']),
models.ConversionTask.objects.filter(
status__in=[models.ConversionTask.STATUS_START_WAITING, models.ConversionTask.STATUS_IN_PROGRESS,
models.ConversionTask.STATUS_STARTED]
).count()
)
def test_list_enqueued(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:list_enqueued'))
self.assertEqual(
len(response.context['tasks_enqueued']),
models.ConversionTask.objects.filter(status=models.ConversionTask.STATUS_ENQUEUED).count()
)
class TaskNewTestCase(LoginRequiredTestCase):
def test_variables(self):
self._login_user()
response = self.client.get(urlresolvers.reverse('efsw.conversion:task:new'))
self.assertIn('form', response.context)
self.assertIn('input_formset', response.context)
self.assertIn('output_formset', response.context)
class TaskCreateJsonTestCase(LoginRequiredTestCase, JsonResponseTestCase):
fixtures = ['filestorage.json']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = urlresolvers.reverse('efsw.conversion:task:create_json')
def test_wrong_method(self):
self._login_user()
response = self.client.get(self.url)
self.assertEqual(405, response.status_code)
def test_invalid_form(self):
self._login_user()
response = self.client.post(self.url)
self.assertJsonError(response, 'FORM_INVALID')
def test_invalid_formset(self):
conversionprofile.load_data()
self._login_user()
data = {
'name': 'Тестовое имя',
'profile': 1
}
response = self.client.post(self.url, data)
self.assertJsonError(response, 'FORMSET_ERROR')
data = {
'name': 'Тестовое имя',
'profile': 1,
'inputs-TOTAL_FORMS': 1,
'inputs-INITIAL_FORMS': 1,
'inputs-MIN_NUM_FORMS': 1,
'inputs-MAX_NUM_FORMS': 1,
'outputs-TOTAL_FORMS': 1,
'outputs-INITIAL_FORMS': 1,
'outputs-MIN_NUM_FORMS': 1,
'outputs-MAX_NUM_FORMS': 1,
}
response = self.client.post(self.url, data)
self.assertJsonError(response, 'FORMSET_INVALID')
errors = self.get_json_errors(response)
self.assertIn('inputs', errors)
self.assertIn('outputs', errors)
def test_valid(self):
conversionprofile.load_data()
self._login_user()
FileStorage.initiate_storage_root(FileStorage.INIT_MODE_SKIP_IF_EXISTS)
fs_in = FileStorage.objects.get(pk='1ac9873a-8cf0-49e1-8a9a-7709930aa8af')
fs_in.initiate_storage_base(FileStorage.INIT_MODE_SKIP_IF_EXISTS)
fs_path = fs_in.get_base_path()
open(os.path.join(fs_path, 'in'), 'w').close()
fs_out = FileStorage.objects.get(pk='11e00904-0f3d-4bfa-a3a9-9c716f87bc01')
fs_out.initiate_storage_base(FileStorage.INIT_MODE_SKIP_IF_EXISTS)
fs_path = fs_out.get_base_path()
try:
os.remove(os.path.join(fs_path, 'out'))
except FileNotFoundError:
pass
data = {
'name': 'Тестовое имя',
'profile': 1,
'inputs-TOTAL_FORMS': 1,
'inputs-INITIAL_FORMS': 1,
'inputs-MIN_NUM_FORMS': 1,
'inputs-MAX_NUM_FORMS': 1,
'outputs-TOTAL_FORMS': 1,
'outputs-INITIAL_FORMS': 1,
'outputs-MIN_NUM_FORMS': 1,
'outputs-MAX_NUM_FORMS': 1,
'inputs-0-storage': '1ac9873a-8cf0-49e1-8a9a-7709930aa8af',
'inputs-0-path': 'in',
'outputs-0-storage': '11e00904-0f3d-4bfa-a3a9-9c716f87bc01',
'outputs-0-path': 'out',
}
response = self.client.post(self.url, data)
self.assertJsonOk(response)
class TaskShowTestCase(TestCase):
def setUp(self):
conversiontask.load_data()
def test_404(self):
response = self.client.get(urlresolvers.reverse(
'efsw.conversion:task:show', args=('e0593092-fbc5-4b20-10f4-677f8954220f', ))
)
self.assertEqual(404, response.status_code)
def test_normal(self):
response = self.client.get(urlresolvers.reverse(
'efsw.conversion:task:show', args=('e0593092-fbc5-4b20-99f4-677f8954220f', ))
)
self.assertEqual(200, response.status_code)
self.assertIn('task', response.context)
class ProfileListTestCase(TestCase):
def setUp(self):
conversionprofile.load_data()
def test_list(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:profile:list'))
self.assertIsInstance(response.context['profiles'], paginator.Page)
page_count = math.ceil(
models.ConversionProfile.objects.count() / getattr(settings, 'EFSW_CONVERTER_PROFILES_PER_PAGE')
)
self.assertEqual(1, response.context['profiles'].number)
self.assertEqual(page_count, response.context['profiles'].paginator.num_pages)
class ProfileShowTestCase(TestCase):
def setUp(self):
conversionprofile.load_data()
def test_404(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:profile:show', args=(1000000, )))
self.assertEqual(404, response.status_code)
def test_normal(self):
response = self.client.get(urlresolvers.reverse('efsw.conversion:profile:show', args=(1, )))
self.assertEqual(200, response.status_code)
self.assertIn('profile', response.context)
class ProfileNewTestCase(LoginRequiredTestCase):
def test_variables(self):
self._login_user()
response = self.client.get(urlresolvers.reverse('efsw.conversion:profile:new'))
self.assertIn('form', response.context)
self.assertIn('input_formset', response.context)
self.assertIn('output_formset', response.context)
class ProfileCreateJsonTestCase(LoginRequiredTestCase, JsonResponseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = urlresolvers.reverse('efsw.conversion:profile:create_json')
def test_wrong_method(self):
self._login_user()
response = self.client.get(self.url)
self.assertEqual(405, response.status_code)
def test_invalid_form(self):
self._login_user()
response = self.client.post(self.url)
self.assertJsonError(response, 'FORM_INVALID')
def test_invalid_formset(self):
self._login_user()
data = {
'name': 'Тестовый профиль'
}
response = self.client.post(self.url, data)
self.assertJsonError(response, 'FORMSET_ERROR')
data = {
'name': 'Тестовое имя',
'inputs-TOTAL_FORMS': 1,
'inputs-INITIAL_FORMS': 1,
'inputs-MIN_NUM_FORMS': 1,
'inputs-MAX_NUM_FORMS': 1,
'outputs-TOTAL_FORMS': 1,
'outputs-INITIAL_FORMS': 1,
'outputs-MIN_NUM_FORMS': 1,
'outputs-MAX_NUM_FORMS': 1,
}
response = self.client.post(self.url, data)
self.assertJsonOk(response)
profile = models.ConversionProfile.objects.get(name='Тестовое имя')
self.assertEqual(1, len(profile.args_builder.inputs))
self.assertEqual(1, len(profile.args_builder.outputs))
class ProfileShowJson(JsonResponseTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.url = urlresolvers.reverse('efsw.conversion:profile:show_json')
def setUp(self):
conversionprofile.load_data()
def test_invalid(self):
response = self.client.get(self.url)
self.assertJsonError(response, 'REQUIRED_REQUEST_PARAMETER_IS_MISSING')
response = self.client.get('{0}?id=abc'.format(self.url))
self.assertJsonError(response, 'UNEXPECTED_REQUEST_PARAMETER_VALUE')
response = self.client.get('{0}?id=1000000'.format(self.url))
self.assertJsonError(response, 'PROFILE_NOT_FOUND')
def test_valid(self):
response = self.client.get('{0}?id=1'.format(self.url))
self.assertJsonOk(response)
data = self.get_json_data(response)
self.assertIn('name', data)
self.assertIn('description', data)
self.assertIn('inputs', data)
self.assertIn('outputs', data)
self.assertEqual(1, len(data['inputs']))
self.assertEqual(1, len(data['outputs']))
| [
"[email protected]"
] | |
5d3ce6d2880621f8511da0cf83c66ca2803270cd | 0c85e44f3faee190bf7ec4893a287b60fa8afa93 | /parse_questions.py | fbb10a0ff08dfc01707aa74a96455bedb5573d14 | [
"MIT"
] | permissive | nthmost/rheti-python | 8286124a9473413b7c840e45c6647174ae1b2aee | 4c862f480e571bea7ec721f7a488e92e85839346 | refs/heads/master | 2020-12-30T12:55:46.613198 | 2017-05-16T19:22:13 | 2017-05-16T19:22:13 | 91,373,467 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from csv import DictReader
dr = DictReader(open('questions.csv'))
for item in dr:
print(item)
| [
"[email protected]"
] | |
63e7e972aa5a64516677fb0bba286c6a6bbfff36 | eaa59c237e86432d8ae4366823a0b0dfa3d789d3 | /bookwyrm/tests/models/test_fields.py | 10c674d9979cd01533928b0a9cdf9c7cb87c483a | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | ahstro/bookwyrm | 769b688eb1fcccd54f866a3d603d729100b5197b | e0d8e49d57d7c700a9d64559c7d8a14f7f08983e | refs/heads/main | 2023-02-05T01:02:34.275970 | 2020-12-24T17:36:26 | 2020-12-24T17:36:26 | 324,860,126 | 0 | 0 | NOASSERTION | 2020-12-27T22:26:38 | 2020-12-27T22:26:37 | null | UTF-8 | Python | false | false | 17,334 | py | ''' testing models '''
from io import BytesIO
from collections import namedtuple
from dataclasses import dataclass
import json
import pathlib
import re
from typing import List
from unittest.mock import patch
from PIL import Image
import responses
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from django.utils import timezone
from bookwyrm.activitypub.base_activity import ActivityObject
from bookwyrm.models import fields, User, Status
from bookwyrm.models.base_model import ActivitypubMixin, BookWyrmModel
#pylint: disable=too-many-public-methods
class ActivitypubFields(TestCase):
''' overwrites standard model feilds to work with activitypub '''
def test_validate_remote_id(self):
''' should look like a url '''
self.assertIsNone(fields.validate_remote_id('http://www.example.com'))
self.assertIsNone(fields.validate_remote_id('https://www.example.com'))
self.assertIsNone(fields.validate_remote_id('http://exle.com/dlg-23/x'))
self.assertRaises(
ValidationError, fields.validate_remote_id,
'http:/example.com/dlfjg-23/x')
self.assertRaises(
ValidationError, fields.validate_remote_id,
'www.example.com/dlfjg-23/x')
self.assertRaises(
ValidationError, fields.validate_remote_id,
'http://www.example.com/dlfjg 23/x')
def test_activitypub_field_mixin(self):
''' generic mixin with super basic to and from functionality '''
instance = fields.ActivitypubFieldMixin()
self.assertEqual(instance.field_to_activity('fish'), 'fish')
self.assertEqual(instance.field_from_activity('fish'), 'fish')
self.assertFalse(instance.deduplication_field)
instance = fields.ActivitypubFieldMixin(
activitypub_wrapper='endpoints', activitypub_field='outbox'
)
self.assertEqual(
instance.field_to_activity('fish'),
{'outbox': 'fish'}
)
self.assertEqual(
instance.field_from_activity({'outbox': 'fish'}),
'fish'
)
self.assertEqual(instance.get_activitypub_field(), 'endpoints')
instance = fields.ActivitypubFieldMixin()
instance.name = 'snake_case_name'
self.assertEqual(instance.get_activitypub_field(), 'snakeCaseName')
def test_set_field_from_activity(self):
''' setter from entire json blob '''
@dataclass
class TestModel:
''' real simple mock '''
field_name: str
mock_model = TestModel(field_name='bip')
TestActivity = namedtuple('test', ('fieldName', 'unrelated'))
data = TestActivity(fieldName='hi', unrelated='bfkjh')
instance = fields.ActivitypubFieldMixin()
instance.name = 'field_name'
instance.set_field_from_activity(mock_model, data)
self.assertEqual(mock_model.field_name, 'hi')
def test_set_activity_from_field(self):
''' set json field given entire model '''
@dataclass
class TestModel:
''' real simple mock '''
field_name: str
unrelated: str
mock_model = TestModel(field_name='bip', unrelated='field')
instance = fields.ActivitypubFieldMixin()
instance.name = 'field_name'
data = {}
instance.set_activity_from_field(data, mock_model)
self.assertEqual(data['fieldName'], 'bip')
def test_remote_id_field(self):
''' just sets some defaults on charfield '''
instance = fields.RemoteIdField()
self.assertEqual(instance.max_length, 255)
self.assertTrue(instance.deduplication_field)
with self.assertRaises(ValidationError):
instance.run_validators('http://www.example.com/dlfjg 23/x')
def test_username_field(self):
''' again, just setting defaults on username field '''
instance = fields.UsernameField()
self.assertEqual(instance.activitypub_field, 'preferredUsername')
self.assertEqual(instance.max_length, 150)
self.assertEqual(instance.unique, True)
with self.assertRaises(ValidationError):
instance.run_validators('one two')
instance.run_validators('a*&')
instance.run_validators('trailingwhite ')
self.assertIsNone(instance.run_validators('aksdhf'))
self.assertEqual(instance.field_to_activity('[email protected]'), 'test')
def test_privacy_field_defaults(self):
''' post privacy field's many default values '''
instance = fields.PrivacyField()
self.assertEqual(instance.max_length, 255)
self.assertEqual(
[c[0] for c in instance.choices],
['public', 'unlisted', 'followers', 'direct'])
self.assertEqual(instance.default, 'public')
self.assertEqual(
instance.public, 'https://www.w3.org/ns/activitystreams#Public')
def test_privacy_field_set_field_from_activity(self):
''' translate between to/cc fields and privacy '''
@dataclass(init=False)
class TestActivity(ActivityObject):
''' real simple mock '''
to: List[str]
cc: List[str]
id: str = 'http://hi.com'
type: str = 'Test'
class TestPrivacyModel(ActivitypubMixin, BookWyrmModel):
''' real simple mock model because BookWyrmModel is abstract '''
privacy_field = fields.PrivacyField()
mention_users = fields.TagField(User)
user = fields.ForeignKey(User, on_delete=models.CASCADE)
public = 'https://www.w3.org/ns/activitystreams#Public'
data = TestActivity(
to=[public],
cc=['bleh'],
)
model_instance = TestPrivacyModel(privacy_field='direct')
self.assertEqual(model_instance.privacy_field, 'direct')
instance = fields.PrivacyField()
instance.name = 'privacy_field'
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, 'public')
data.to = ['bleh']
data.cc = []
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, 'direct')
data.to = ['bleh']
data.cc = [public, 'waah']
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, 'unlisted')
def test_privacy_field_set_activity_from_field(self):
''' translate between to/cc fields and privacy '''
user = User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
public = 'https://www.w3.org/ns/activitystreams#Public'
followers = '%s/followers' % user.remote_id
instance = fields.PrivacyField()
instance.name = 'privacy_field'
model_instance = Status.objects.create(user=user, content='hi')
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity['to'], [public])
self.assertEqual(activity['cc'], [followers])
model_instance = Status.objects.create(user=user, privacy='unlisted')
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity['to'], [followers])
self.assertEqual(activity['cc'], [public])
model_instance = Status.objects.create(user=user, privacy='followers')
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity['to'], [followers])
self.assertEqual(activity['cc'], [])
model_instance = Status.objects.create(
user=user,
privacy='direct',
)
model_instance.mention_users.set([user])
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity['to'], [user.remote_id])
self.assertEqual(activity['cc'], [])
def test_foreign_key(self):
''' should be able to format a related model '''
instance = fields.ForeignKey('User', on_delete=models.CASCADE)
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
# returns the remote_id field of the related object
self.assertEqual(instance.field_to_activity(item), 'https://e.b/c')
@responses.activate
def test_foreign_key_from_activity_str(self):
''' create a new object from a foreign key '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json')
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
# test receiving an unknown remote id and loading data
responses.add(
responses.GET,
'https://example.com/user/mouse',
json=userdata,
status=200)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(
'https://example.com/user/mouse')
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, 'https://example.com/user/mouse')
self.assertEqual(value.name, 'MOUSE?? MOUSE!!')
def test_foreign_key_from_activity_dict(self):
''' test recieving activity json '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json')
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(userdata)
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, 'https://example.com/user/mouse')
self.assertEqual(value.name, 'MOUSE?? MOUSE!!')
# et cetera but we're not testing serializing user json
def test_foreign_key_from_activity_dict_existing(self):
''' test receiving a dict of an existing object in the db '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json'
)
userdata = json.loads(datafile.read_bytes())
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
user.remote_id = 'https://example.com/user/mouse'
user.save()
User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
value = instance.field_from_activity(userdata)
self.assertEqual(value, user)
def test_foreign_key_from_activity_str_existing(self):
''' test receiving a remote id of an existing object in the db '''
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
User.objects.create_user(
'rat', '[email protected]', 'ratword', local=True)
value = instance.field_from_activity(user.remote_id)
self.assertEqual(value, user)
def test_one_to_one_field(self):
''' a gussied up foreign key '''
instance = fields.OneToOneField('User', on_delete=models.CASCADE)
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
self.assertEqual(instance.field_to_activity(item), {'a': 'b'})
def test_many_to_many_field(self):
''' lists! '''
instance = fields.ManyToManyField('User')
Serializable = namedtuple('Serializable', ('to_activity', 'remote_id'))
Queryset = namedtuple('Queryset', ('all', 'instance'))
item = Serializable(lambda: {'a': 'b'}, 'https://e.b/c')
another_item = Serializable(lambda: {}, 'example.com')
items = Queryset(lambda: [item], another_item)
self.assertEqual(instance.field_to_activity(items), ['https://e.b/c'])
instance = fields.ManyToManyField('User', link_only=True)
instance.name = 'snake_case'
self.assertEqual(
instance.field_to_activity(items),
'example.com/snake_case'
)
@responses.activate
def test_many_to_many_field_from_activity(self):
''' resolve related fields for a list, takes a list of remote ids '''
instance = fields.ManyToManyField(User)
datafile = pathlib.Path(__file__).parent.joinpath(
'../data/ap_user.json'
)
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata['icon']
# test receiving an unknown remote id and loading data
responses.add(
responses.GET,
'https://example.com/user/mouse',
json=userdata,
status=200)
with patch('bookwyrm.models.user.set_remote_server.delay'):
value = instance.field_from_activity(
['https://example.com/user/mouse', 'bleh']
)
self.assertIsInstance(value, list)
self.assertEqual(len(value), 1)
self.assertIsInstance(value[0], User)
def test_tag_field(self):
''' a special type of many to many field '''
instance = fields.TagField('User')
Serializable = namedtuple(
'Serializable',
('to_activity', 'remote_id', 'name_field', 'name')
)
Queryset = namedtuple('Queryset', ('all', 'instance'))
item = Serializable(
lambda: {'a': 'b'}, 'https://e.b/c', 'name', 'Name')
another_item = Serializable(
lambda: {}, 'example.com', '', '')
items = Queryset(lambda: [item], another_item)
result = instance.field_to_activity(items)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].href, 'https://e.b/c')
self.assertEqual(result[0].name, 'Name')
self.assertEqual(result[0].type, 'Serializable')
def test_tag_field_from_activity(self):
''' loadin' a list of items from Links '''
# TODO
@responses.activate
def test_image_field(self):
''' storing images '''
user = User.objects.create_user(
'mouse', '[email protected]', 'mouseword', local=True)
image_file = pathlib.Path(__file__).parent.joinpath(
'../../static/images/default_avi.jpg')
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
user.avatar.save(
'test.jpg',
ContentFile(output.getvalue())
)
output = fields.image_serializer(user.avatar, alt='alt text')
self.assertIsNotNone(
re.match(
r'.*\.jpg',
output.url,
)
)
self.assertEqual(output.name, 'alt text')
self.assertEqual(output.type, 'Image')
instance = fields.ImageField()
output = fields.image_serializer(user.avatar, alt=None)
self.assertEqual(instance.field_to_activity(user.avatar), output)
responses.add(
responses.GET,
'http://www.example.com/image.jpg',
body=user.avatar.file.read(),
status=200)
loaded_image = instance.field_from_activity(
'http://www.example.com/image.jpg')
self.assertIsInstance(loaded_image, list)
self.assertIsInstance(loaded_image[1], ContentFile)
def test_datetime_field(self):
''' this one is pretty simple, it just has to use isoformat '''
instance = fields.DateTimeField()
now = timezone.now()
self.assertEqual(instance.field_to_activity(now), now.isoformat())
self.assertEqual(
instance.field_from_activity(now.isoformat()), now
)
self.assertEqual(instance.field_from_activity('bip'), None)
def test_array_field(self):
''' idk why it makes them strings but probably for a good reason '''
instance = fields.ArrayField(fields.IntegerField)
self.assertEqual(instance.field_to_activity([0, 1]), ['0', '1'])
def test_html_field(self):
''' sanitizes html, the sanitizer has its own tests '''
instance = fields.HtmlField()
self.assertEqual(
instance.field_from_activity('<marquee><p>hi</p></marquee>'),
'<p>hi</p>'
)
| [
"[email protected]"
] | |
4c4723be440d412ac4176642f843b0e75a6b5236 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_10_01/operations/virtual_networks_operations.py | 9d1a9d853f168e10bc7ef04d431ed10bedfb5e5d | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 31,265 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-10-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-10-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def get(
self, resource_group_name, virtual_network_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetwork or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_10_01.models.VirtualNetwork or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetwork')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual
network operation
:type parameters:
~azure.mgmt.network.v2018_10_01.models.VirtualNetwork
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetwork or
ClientRawResponse<VirtualNetwork> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetwork or
ClientRawResponse<VirtualNetwork> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2018_10_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2018_10_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'}
def check_ip_address_availability(
self, resource_group_name, virtual_network_name, ip_address, custom_headers=None, raw=False, **operation_config):
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: IPAddressAvailabilityResult or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2018_10_01.models.IPAddressAvailabilityResult or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.check_ip_address_availability.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IPAddressAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'}
def list_usage(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkUsage
:rtype:
~azure.mgmt.network.v2018_10_01.models.VirtualNetworkUsagePaged[~azure.mgmt.network.v2018_10_01.models.VirtualNetworkUsage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_usage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkUsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'}
| [
"[email protected]"
] | |
4b54bbdeef19d909ddd5bef71dfe0625ab46195f | 9f930df50f28e6cbc74089057fb4418460a7f657 | /regsoft/migrations/0001_initial.py | 494f8a33ccdd12c1f25b1bc6e07ef0d7edadaeb4 | [
"MIT"
] | permissive | xxyyzz/apogee-2016 | 2c57b3c48334e798cab560d6525567da9b2ede61 | c55f6427bbe246060aacbeb831e1519fb051a1b1 | refs/heads/master | 2021-05-16T11:55:21.525340 | 2017-09-07T19:05:00 | 2017-09-07T19:05:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bhavan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('room', models.CharField(max_length=50)),
('vacancy', models.IntegerField()),
('bhavan', models.ForeignKey(to='regsoft.Bhavan')),
],
),
]
| [
"[email protected]"
] | |
e3e9ffd130063e0f3807af450237e3f23ff8ab2d | 608ca6de9e2de70312dd3abc6fdf7d9aef326b53 | /feder/institutions/migrations/0010_auto_20170808_0252.py | 7c2c74204332edd5346dca93c2a0c7bf28d4b60d | [
"MIT"
] | permissive | watchdogpolska/feder | 4d3f3a5de5d2d5d266bf34dea4969f583f9f1aa0 | 57cfde4aa8680c08758ee531d69a40b0f7f1d9d7 | refs/heads/master | 2023-08-18T04:52:45.284855 | 2023-08-16T10:54:03 | 2023-08-16T10:54:03 | 40,154,083 | 18 | 12 | MIT | 2023-08-22T11:35:15 | 2015-08-04T00:10:17 | Python | UTF-8 | Python | false | false | 1,125 | py | # Generated by Django 1.11.2 on 2017-08-08 02:52
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [("institutions", "0009_auto_20170708_2222")]
operations = [
migrations.AddField(
model_name="institution",
name="extra",
field=jsonfield.fields.JSONField(
default={}, verbose_name=b"Unorganized additional information"
),
preserve_default=False,
),
migrations.AddField(
model_name="institution",
name="parents",
field=models.ManyToManyField(
blank=True,
null=True,
related_name="_institution_parents_+",
to="institutions.Institution",
verbose_name="Parent institutions",
),
),
migrations.AddField(
model_name="institution",
name="regon",
field=models.CharField(
blank=True, max_length=14, verbose_name="REGON number"
),
),
]
| [
"[email protected]"
] | |
3bd9d220a3bff416185608e78413c0e27bdbfaf2 | a382716034b91d86ac7c8a548a63d236d6da8032 | /iaso/dhis2/api_logger.py | 7347f082e7d6aac87c5f6449751f5d6ad37657dc | [
"MIT"
] | permissive | lpontis/iaso | 336221335fe33ca9e07e40feb676f57bbdc749ca | 4d3a9d3faa6b3ed3a2e08c728cc4f03e5a0bbcb6 | refs/heads/main | 2023-08-12T20:34:10.823260 | 2021-10-04T07:34:50 | 2021-10-04T07:34:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | from iaso.models.base import ExportLog
from dhis2 import RequestException
import json
class ApiLogger:
def __init__(self, api):
self.api = api
self.export_logs = []
def get(self, url, params=None):
full_url = self.api.base_url + "/" + url
try:
response = self.api.get(url, params=params)
export_log = ExportLog()
export_log.sent = params
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
self.export_logs.append(export_log)
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, params)
return response
def post(self, url, payload):
full_url = self.api.base_url + "/" + url
try:
response = self.api.post(url, payload)
export_log = ExportLog()
export_log.sent = payload
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
self.export_logs.append(export_log)
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, payload)
return response
def put(self, url, payload):
full_url = self.api.base_url + "/" + url
response = self.api.put(url, payload)
try:
export_log = ExportLog()
export_log.sent = payload
export_log.received = response.json()
export_log.url = full_url
export_log.http_status = 200
except RequestException as dhis2_exception:
self.log_exception(dhis2_exception, full_url, payload)
self.export_logs.append(export_log)
return response
def pop_export_logs(self):
result = self.export_logs
self.export_logs = []
return result
def log_exception(self, dhis2_exception, full_url, params):
resp = {}
try:
resp = json.loads(dhis2_exception.description)
except:
resp = {
"status": "ERROR",
"description": "non json response return by server",
"raw_data": dhis2_exception.description,
}
resp = json.loads(dhis2_exception.description)
export_log = ExportLog()
export_log.url = full_url
export_log.sent = params
export_log.received = resp
export_log.http_code = dhis2_exception.code
self.export_logs.append(export_log)
raise dhis2_exception
| [
"[email protected]"
] | |
ec98c3f0072afc3854407df67e70813898de5049 | baad457b1859f218314631c2bf10e28ab34aa0a5 | /chapter02.py | d3f57722b9729416a00f466431e597015e6902aa | [] | no_license | hankehly/data-structures-and-algorithms-in-python | d856ae6049639d3f713556e7aee91cb51443f99b | 26d2ad4852c103e5c96138de6957fa092a6b81aa | refs/heads/master | 2023-02-19T13:38:52.986727 | 2021-01-25T00:51:19 | 2021-01-25T00:51:19 | 325,148,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,526 | py | def R_2_01():
"""
3 examples of life-critical software applications are
1. remote surgery software that moves robotic arms
2. automated rocket ignition and trajectory adjustment software in space flight
3. pacemakers
"""
pass
def R_2_02():
"""
An example of a software application where adaptability makes the difference between
continued sales and bankruptcy is a data-analysis app. To be marketable to a wide
audience, it must scale to different amounts of input. If the system was written in
a way that assumes a maximum of N data-points, sales may be unable to target certain
clients, or the company may lose its existing customers to competitors with more
scalable systems.
"""
pass
def R_2_03():
"""
An example text-editor GUI may encapsulate behavior to process key-strokes, save a
text file, print a text file, or pass a text file to the python interpreter to run
as a program.
"""
pass
def R_2_04():
class Flower:
def __init__(self, name: str = "iris", n_petals: int = 5, price: float = 0.99):
self._name = name
self._n_petals = n_petals
self._price = price
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def n_petals(self):
return self._n_petals
@n_petals.setter
def n_petals(self, value):
self._n_petals = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
if value < 0:
raise ValueError("price must be positive")
self._price = value
f = Flower()
assert f.name == "iris"
f.name = "foo"
assert f.name == "foo"
try:
f.price = -1
except ValueError:
assert True
else:
assert False
def R_2_05():
class CreditCard:
def charge(self, price):
if not isinstance(price, (int, float)):
raise TypeError("price must be numeric")
def R_2_06():
class CreditCard:
def make_payment(self, amount):
if not isinstance(amount, (int, float)):
raise TypeError("amount must be numeric")
elif amount < 0:
raise ValueError("amount cannot be negative")
def R_2_07():
class CreditCard:
def __init__(self, customer, bank, acnt, limit, balance=0):
self._customer = customer
self._bank = bank
self._account = acnt
self._limit = limit
self._balance = balance
def R_2_08():
class CreditCard:
def __init__(self, customer, bank, acnt, limit, balance=0):
self._customer = customer
self._bank = bank
self._account = acnt
self._limit = limit
self._balance = balance
@property
def bank(self):
return self._bank
@property
def balance(self):
return self._balance
def charge(self, price):
if self._balance + price > self._limit:
return False
else:
self._balance += price
return True
def run():
wallet = [
CreditCard("John Archer", "KS Savings", "1234 5678 9101 1121", 2500),
CreditCard("John Archer", "KS Federal", "4321 5678 9101 1121", 3500),
CreditCard("John Archer", "KS Finance", "2413 5678 9101 1121", 5000),
]
for i in range(1, 100):
for j, card in enumerate(wallet, start=1):
price = i * j
if not card.charge(price):
print(f"{card.bank} (i={i}, balance={card.balance}, price={price})")
return
# run()
def R_2_09():
class Vector:
def __init__(self, d):
self._coords = [0] * d
def __len__(self):
return len(self._coords)
def __getitem__(self, item):
return self._coords[item]
def __setitem__(self, key, value):
self._coords[key] = value
def __sub__(self, other):
if len(other) != len(self):
raise ValueError("dimensions must agree")
result = Vector(len(other))
for i in range(len(result)):
result[i] = self[i] - other[i]
return result
u = Vector(3)
u[0] = 3
u[1] = 3
u[2] = 3
v = Vector(3)
v[0] = 4
v[1] = 1
v[2] = 3
r = u - v
assert r[0] == -1
assert r[1] == 2
assert r[2] == 0
def R_2_10():
class Vector:
def __init__(self, d):
self._coords = [0] * d
def __len__(self):
return len(self._coords)
def __getitem__(self, item):
return self._coords[item]
def __setitem__(self, key, value):
self._coords[key] = value
def __neg__(self):
result = Vector(len(self))
for i in range(len(result)):
result[i] = -self[i]
return result
v = Vector(3)
v[0] = 1
v[1] = 0
v[2] = -1
r = -v
assert r[0] == -1
assert r[1] == 0
assert r[2] == 1
if __name__ == "__main__":
R_2_01()
R_2_02()
R_2_03()
R_2_04()
R_2_05()
R_2_06()
R_2_07()
R_2_08()
R_2_09()
R_2_10()
| [
"[email protected]"
] | |
5d45c573744b34fc2473e7e50c09225740106a27 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/77e550192cdbc1e734f5ebfcd347539d9300559b-<main>-fix.py | b577d86ced2688cfc84b23b2c720d617725534ed | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,507 | py | def main():
module = AnsibleModule(argument_spec=dict(command=dict(default='install', type='str', required=False), arguments=dict(default='', type='str', required=False), working_dir=dict(type='path', aliases=['working-dir']), global_command=dict(default=False, type='bool', aliases=['global-command']), prefer_source=dict(default=False, type='bool', aliases=['prefer-source']), prefer_dist=dict(default=False, type='bool', aliases=['prefer-dist']), no_dev=dict(default=True, type='bool', aliases=['no-dev']), no_scripts=dict(default=False, type='bool', aliases=['no-scripts']), no_plugins=dict(default=False, type='bool', aliases=['no-plugins']), optimize_autoloader=dict(default=True, type='bool', aliases=['optimize-autoloader']), ignore_platform_reqs=dict(default=False, type='bool', aliases=['ignore-platform-reqs'])), required_if=[('global_command', False, ['working_dir'])], supports_check_mode=True)
command = module.params['command']
if re.search('\\s', command):
module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'")
arguments = module.params['arguments']
global_command = module.params['global_command']
available_options = get_available_options(module=module, command=command)
options = []
default_options = ['no-ansi', 'no-interaction', 'no-progress']
for option in default_options:
if (option in available_options):
option = ('--%s' % option)
options.append(option)
if (not global_command):
options.extend(['--working-dir', ("'%s'" % module.params['working_dir'])])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for (param, option) in option_params.items():
if (module.params.get(param) and (option in available_options)):
option = ('--%s' % option)
options.append(option)
if module.check_mode:
options.append('--dry-run')
(rc, out, err) = composer_command(module, command, arguments, options, global_command)
if (rc != 0):
output = parse_out(err)
module.fail_json(msg=output, stdout=err)
else:
output = parse_out((out + err))
module.exit_json(changed=has_changed(output), msg=output, stdout=(out + err)) | [
"[email protected]"
] | |
33b19de4abb9ea442bd9f7425e292d2f94d9f282 | 9f13c7be7e88d2486076db0b89a215ab900fea1a | /main/apps/studies/views.py | ce8693e21a63aa02a78dfaa0cd887984bc351d4d | [] | no_license | truhlik/reimpay | 534664d2e506f39ec6b0edea471e7cfbad179c3b | ac8291dc8a0ac543283ee10aecd32deba24a1641 | refs/heads/main | 2023-08-10T16:45:55.283272 | 2021-10-06T19:00:15 | 2021-10-06T19:00:15 | 414,333,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,349 | py | from django.utils.decorators import method_decorator
from django.views.generic.detail import SingleObjectMixin
from rest_framework import viewsets, mixins
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from main.apps.core.swagger import path_id_param_str, path_id_param_int
from main.libraries import permissions
from main.libraries.serializers.mixins import GetSerializerClassMixin
from . import models, serializers, utils
from .filters import PatientVisitItemFilter, PatientFilter
from ...libraries.permissions import is_doctor, check_patient_id_in_session, get_patient_id_list_from_session
from ...libraries.views import ApiPdfTemplateView
@method_decorator(name='retrieve', decorator=path_id_param_str)
@method_decorator(name='update', decorator=path_id_param_str)
@method_decorator(name='partial_update', decorator=path_id_param_str)
@method_decorator(name='destroy', decorator=path_id_param_str)
@method_decorator(name='config', decorator=path_id_param_str)
class StudyViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.Study.objects.active().prefetch_related()
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdminOrReadOnly, permissions.StudyStatusPermission]
filterset_fields = ['id']
serializer_class = serializers.StudyReadSerializer
suffix = 'List'
serializer_action_classes = {
'list': serializers.StudyReadSerializer,
'retrieve': serializers.StudyReadSerializer,
'update': serializers.StudyWriteSerializer,
'partial_update': serializers.StudyWriteSerializer,
'create': serializers.StudyWriteSerializer,
'config': serializers.StudyConfigSerializer,
'users': serializers.UserSerializer,
}
def get_queryset(self):
qs = super(StudyViewSet, self).get_queryset().list(self.request.user).order_by('identifier')
if self.action == 'list':
qs = qs.prefetch_list()
return qs
def perform_destroy(self, instance):
instance.custom_delete()
@action(methods=['GET'], detail=True, serializer_class=serializers.StudyConfigSerializer)
def config(self, request, *args, **kwargs):
pk = kwargs.get('pk', None)
if pk == 'null':
instance = models.Study()
else:
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
@property
def paginator(self):
if self.action == 'users':
return None
return super(StudyViewSet, self).paginator
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
@method_decorator(name='destroy', decorator=path_id_param_int)
class StudyItemViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.StudyItem.objects.all()
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdmin,
permissions.StudyStatusPermission]
serializer_class = serializers.StudyItemSerializer
filterset_fields = ['study_id']
def get_queryset(self):
return super(StudyItemViewSet, self).get_queryset().company(self.request.user)
def perform_destroy(self, instance):
instance.custom_delete()
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
@method_decorator(name='destroy', decorator=path_id_param_int)
class ArmViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.Arm.objects.all()
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdminOrReadOnly,
permissions.StudyStatusPermission]
serializer_class = serializers.ArmSerializer
filterset_fields = ['study_id']
def get_queryset(self):
return super(ArmViewSet, self).get_queryset().company(self.request.user)
def perform_destroy(self, instance):
instance.custom_delete()
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
@method_decorator(name='destroy', decorator=path_id_param_int)
class SiteViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.Site.objects.active()
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdminOrReadOnly,
permissions.StudyStatusPermission]
serializer_class = serializers.SiteSerializer
serializer_action_classes = {
'retrieve': serializers.SiteSerializer,
'create': serializers.SiteSerializer,
'update': serializers.SiteSerializer,
'partial_update': serializers.SiteSerializer,
'destroy': serializers.SiteSerializer,
'list': serializers.SiteSerializer,
'patients': serializers.SitePatientSerializer,
}
filterset_fields = ['study_id', 'id']
def get_queryset(self):
return super(SiteViewSet, self).get_queryset().company(self.request.user).order_by('title')
@action(methods=['GET'], detail=False, serializer_class=serializers.SitePatientSerializer)
def patients(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def perform_destroy(self, instance):
instance.custom_delete()
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
# @method_decorator(name='destroy', decorator=path_id_param_int)
class PatientViewSet(GetSerializerClassMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.Patient.objects.all().select_related('arm', 'site').prefetch_related('patient_visits')
permission_classes = [permissions.IsCraOrAdminOrDoctor,
permissions.OnlyAdminOrCraCanCreatePermission,
permissions.OnlyAdminOrCraCanDeletePermission,
permissions.OnlyAdminOrCraCanListPermission,
permissions.IsOwnerOrObjIDInSession,
permissions.StudyStatusPermission,
]
serializer_class = serializers.PatientBaseSerializer
serializer_action_classes = {
'retrieve': serializers.PatientDetailSerializer
}
# filterset_fields = ['study_id', 'site_id']
filter_class = PatientFilter
def get_queryset(self):
if self.request.user.is_authenticated:
qs = super(PatientViewSet, self).get_queryset().company(self.request.user)
elif is_doctor(self.request):
patient_id_lst = get_patient_id_list_from_session(self.request)
qs = super(PatientViewSet, self).get_queryset().filter(id__in=patient_id_lst)
else:
return super(PatientViewSet, self).get_queryset().none()
if self.action == 'retrieve':
qs = qs.prefetch_detail()
return qs
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
@method_decorator(name='destroy', decorator=path_id_param_int)
class VisitViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.Visit.objects.all().select_related('arm', 'study')
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdminOrReadOnly,
permissions.StudyStatusPermission]
serializer_class = serializers.VisitSerializer
filterset_fields = ['study_id', 'arm_id']
def get_queryset(self):
return super(VisitViewSet, self).get_queryset().active().company(self.request.user).order_by('order')
def perform_destroy(self, instance):
instance.custom_delete()
@method_decorator(name='retrieve', decorator=path_id_param_int)
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
@method_decorator(name='destroy', decorator=path_id_param_int)
class VisitItemViewSet(GetSerializerClassMixin, viewsets.ModelViewSet):
queryset = models.VisitItem.objects.all().active()
permission_classes = [permissions.HasCompanyPermission, permissions.IsOwner, permissions.IsAdminOrReadOnly,
permissions.StudyStatusPermission]
serializer_class = serializers.VisitItemSerializer
filterset_fields = ['visit_id']
def get_queryset(self):
return super(VisitItemViewSet, self).get_queryset().active().company(self.request.user)
def perform_destroy(self, instance):
instance.custom_delete()
@method_decorator(name='destroy', decorator=path_id_param_int)
class PatientVisitViewSet(GetSerializerClassMixin,
mixins.CreateModelMixin,
mixins.ListModelMixin,
mixins.DestroyModelMixin,
viewsets.GenericViewSet):
queryset = models.PatientVisit.objects.all()
permission_classes = [permissions.IsCraOrAdminOrDoctor,
permissions.OnlyAdminOrCraCanDeletePermission,
permissions.OnlyAdminOrCraCanUpdatePermission,
permissions.StudyStatusPermission,
]
serializer_class = serializers.PatientVisitSerializer
filterset_fields = ['patient_id']
def get_queryset(self):
if self.request.user.is_authenticated:
return super(PatientVisitViewSet, self).get_queryset().company(self.request.user)
elif is_doctor(self.request):
patient_id = self.request.GET.get('patient_id', None)
if not check_patient_id_in_session(self.request, patient_id):
return super(PatientVisitViewSet, self).get_queryset().none()
return super(PatientVisitViewSet, self).get_queryset().filter(patient_id=patient_id)
return super(PatientVisitViewSet, self).get_queryset().none()
def perform_destroy(self, instance):
if not utils.can_delete_patient_visit(instance):
raise ValidationError("You are not allowed to perform this action.")
instance.delete()
@method_decorator(name='update', decorator=path_id_param_int)
@method_decorator(name='partial_update', decorator=path_id_param_int)
class PatientVisitItemViewSet(GetSerializerClassMixin,
mixins.CreateModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.PatientVisitItem.objects.all().select_related('patient_visit__patient', 'visit_item')
permission_classes = [permissions.IsCraOrAdminOrDoctor,
permissions.OnlyAdminOrCraCanCreatePermission,
permissions.OnlyAdminOrCraCanDeletePermission,
permissions.OnlyAdminOrCraCanUpdatePermission,
permissions.IsOwner,
permissions.StudyStatusPermission,
]
serializer_class = serializers.PatientVisitItemSerializer
filter_class = PatientVisitItemFilter
def get_queryset(self):
if self.request.user.is_authenticated:
return super(PatientVisitItemViewSet, self).get_queryset().owner(self.request.user)
elif is_doctor(self.request):
patient_id = self.request.GET.get('patient_id', None)
if not check_patient_id_in_session(self.request, patient_id):
return super(PatientVisitItemViewSet, self).get_queryset().none()
return super(PatientVisitItemViewSet, self).get_queryset().filter(patient_id=patient_id)
return super(PatientVisitItemViewSet, self).get_queryset().none()
class SitesInstructionPDFView(SingleObjectMixin, ApiPdfTemplateView):
permission_classes = [permissions.IsCraOrAdmin]
filename = 'site_instruction.pdf'
template_name = 'sites/instrukce.html'
cmd_options = {
'margin-top': 3,
}
def get_queryset(self):
if self.request.user.is_anonymous:
return models.Site.objects.none()
return models.Site.objects.owner(self.request.user)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(SitesInstructionPDFView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(SitesInstructionPDFView, self).get_context_data(**kwargs)
ctx.update({
'site': self.object,
'study': self.object.study,
})
return ctx
class SitesPatientFormPDFView(SingleObjectMixin, ApiPdfTemplateView):
permission_classes = [permissions.IsCraOrAdmin]
filename = 'patient_form.pdf'
template_name = 'sites/souhlas.html'
cmd_options = {
'margin-top': 20,
}
def get_queryset(self):
if self.request.user.is_anonymous:
return models.Site.objects.none()
return models.Site.objects.owner(self.request.user)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(SitesPatientFormPDFView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(SitesPatientFormPDFView, self).get_context_data(**kwargs)
ctx.update({
'site': self.object,
'study': self.object.study,
'sponsor_name': self.object.study.get_sponsor_name(),
'cro': self.object.study.is_cro_operator(),
})
return ctx
| [
"[email protected]"
] | |
f8c0c13aa9f2fb23aa4164a1148faad3a90c1454 | 0bf93a74ce5676e978f3ee79a98a1be90b0e20a5 | /nagios/check_metar_station.py | 7ccf2c55c95b5dbf22239356bc0d162e432bc650 | [
"MIT"
] | permissive | claudemp/iem | 3c926361c55fde3265157e15bc5119d64dbf2418 | 557deb8c46342aa9a18ac56cba59345c072cf225 | refs/heads/master | 2021-04-06T13:39:08.352676 | 2018-03-12T20:58:14 | 2018-03-12T20:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | """Ensure that a station is getting ingested properly
python check_metar_station.py <network> <id> <minute_of_synop>
"""
from __future__ import print_function
import sys
import datetime
from pyiem.util import get_dbconn
def check(network, station, minute):
"""Do the check"""
# Do IEMaccess
res = {'rt_temp': 'M', 'arch_temp': 'M'}
now = datetime.datetime.now() - datetime.timedelta(minutes=75)
res['rt_valid'] = now.replace(minute=minute, second=0, microsecond=0)
pgconn = get_dbconn('iem', user='nobody')
icursor = pgconn.cursor()
icursor.execute("""
SELECT tmpf from current_log c JOIN stations s on
(s.iemid = c.iemid)
WHERE id = %s and network = %s and valid = %s
""", (station, network, res['rt_valid']))
if icursor.rowcount > 0:
row = icursor.fetchone()
res['rt_temp'] = int(row[0])
# Do ASOS
now = datetime.datetime.now() - datetime.timedelta(minutes=135)
res['arch_valid'] = now.replace(minute=minute, second=0, microsecond=0)
pgconn = get_dbconn('asos', user='nobody')
icursor = pgconn.cursor()
icursor.execute("""
SELECT tmpf from alldata where station = %s and valid = %s
""", (station, res['arch_valid']))
if icursor.rowcount > 0:
row = icursor.fetchone()
res['arch_temp'] = int(row[0])
return res
def main(argv):
"""Go Main"""
network = argv[1]
station = argv[2]
minute = int(argv[3])
res = check(network, station, minute)
msg = ('OK' if (res['rt_temp'] != 'M' and res['arch_temp'] != 'M')
else 'CRITICAL')
print(('%s - RT:%s(%s) ARCH:%s(%s) |rttemp=%s;;; archtemp=%s;;;'
) % (msg, res['rt_valid'].strftime("%d%H%M"), res['rt_temp'],
res['arch_valid'].strftime("%d%H%M"), res['arch_temp'],
res['rt_temp'], res['arch_temp']))
if msg == 'OK':
sys.exit(0)
sys.exit(2)
if __name__ == '__main__':
main(sys.argv)
| [
"[email protected]"
] | |
ee9d6a11a76b3a0bd9f2a0d0e67a2943b1c885b2 | 6e038efd962e034aaffc38688c949261b2319818 | /venv/lib/python3.7/ntpath.py | 50dcef194d6176fb51ed00dcdd16316ca39d1ca4 | [] | no_license | aliensmart/Django_message_board | 88bc4ebbaf840440d39a5f099590bd78ef392c73 | f6ae0be4be9c44fd6262e53221cbfbc94e74a402 | refs/heads/master | 2022-12-29T04:07:42.985630 | 2020-10-14T04:59:37 | 2020-10-14T04:59:37 | 303,907,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | /home/alienmoore/anaconda3/lib/python3.7/ntpath.py | [
"[email protected]"
] | |
77e3caad1c173557813c2f8d8ea67470f4576f4e | 2553e81ac1927f9e3e4a18da60d6e9b9cc23affc | /Script2.py | cf98933e6342ec6fd17b8bf958969ac44e45f7e8 | [] | no_license | Sapphirine/ISP-Dataset | 50bbc545c7c62cc39cefc25ed9c53f356e7efd53 | c93190d182b56a3c3c574105d4aff0faefc1f065 | refs/heads/master | 2021-08-31T16:41:33.235009 | 2017-12-22T03:47:21 | 2017-12-22T03:47:21 | 115,070,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
plt.style.use('ggplot')
chosen_indicators = ['IE.PPI.TELE.CD', 'IT.CEL.SETS', 'SIT.CEL.SETS.p2', 'IT.NET.USER.ZS', \
'IT.NET.BBND', 'IT.NET.BBND.p2' ]
df_s = df[df['IndicatorCode'].isin(chosen_indicators)]
df_om = df_subset[df['CountryName']=="Oman"]
def plot_indicator(indicator,delta=10):
ds_f = df_s[['IndicatorName','Year','Value']][df_s['IndicatorCode']==indicator]
try:
title = df_s['IndicatorName'].iloc[0]
except:
title = "None"
x1 = df_s['Year'].values
plt.figure(figsize=(20,5))
| [
"[email protected]"
] | |
1eb57e1d2984843d70cbb76c61433b9f643de98e | 72cce39540db76c1915192470ca83a724aed2186 | /error_reporting/google/cloud/errorreporting_v1beta1/gapic/error_group_service_client.py | d82d1d1a6a0be9e49291008de03fc2991ec2e802 | [
"Apache-2.0"
] | permissive | cclauss/google-cloud-python | 5b7731c5f4f1d545af6906f86d848be18d6d4dfc | 90f3d0b5304fd8aef9cedc35d9323d257b5a36fb | refs/heads/master | 2020-03-27T14:04:35.677857 | 2018-08-29T17:07:42 | 2018-08-29T17:07:42 | 146,642,760 | 0 | 0 | Apache-2.0 | 2018-08-29T18:36:12 | 2018-08-29T18:36:11 | null | UTF-8 | Python | false | false | 10,258 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.clouderrorreporting.v1beta1 ErrorGroupService API."""
import pkg_resources
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.path_template
from google.cloud.errorreporting_v1beta1.gapic import enums
from google.cloud.errorreporting_v1beta1.gapic import error_group_service_client_config
from google.cloud.errorreporting_v1beta1.proto import common_pb2
from google.cloud.errorreporting_v1beta1.proto import error_group_service_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-error-reporting', ).version
class ErrorGroupServiceClient(object):
"""Service for retrieving and updating individual error groups."""
SERVICE_ADDRESS = 'clouderrorreporting.googleapis.com:443'
"""The default address of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
# The name of the interface for this client. This is the key used to find
# method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.devtools.clouderrorreporting.v1beta1.ErrorGroupService'
@classmethod
def group_path(cls, project, group):
"""Return a fully-qualified group string."""
return google.api_core.path_template.expand(
'projects/{project}/groups/{group}',
project=project,
group=group,
)
def __init__(self,
channel=None,
credentials=None,
client_config=error_group_service_client_config.config,
client_info=None):
"""Constructor.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_config (dict): A dictionary of call options for each
method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
'The `channel` and `credentials` arguments to {} are mutually '
'exclusive.'.format(self.__class__.__name__), )
# Create the channel.
if channel is None:
channel = google.api_core.grpc_helpers.create_channel(
self.SERVICE_ADDRESS,
credentials=credentials,
scopes=self._DEFAULT_SCOPES,
)
# Create the gRPC stubs.
self.error_group_service_stub = (
error_group_service_pb2.ErrorGroupServiceStub(channel))
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Write the "inner API call" methods to the class.
# These are wrapped versions of the gRPC stub methods, with retry and
# timeout configuration applied, called by the public methods on
# this class.
self._get_group = google.api_core.gapic_v1.method.wrap_method(
self.error_group_service_stub.GetGroup,
default_retry=method_configs['GetGroup'].retry,
default_timeout=method_configs['GetGroup'].timeout,
client_info=client_info,
)
self._update_group = google.api_core.gapic_v1.method.wrap_method(
self.error_group_service_stub.UpdateGroup,
default_retry=method_configs['UpdateGroup'].retry,
default_timeout=method_configs['UpdateGroup'].timeout,
client_info=client_info,
)
# Service calls
def get_group(self,
group_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Get the specified group.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorGroupServiceClient()
>>>
>>> group_name = client.group_path('[PROJECT]', '[GROUP]')
>>>
>>> response = client.get_group(group_name)
Args:
group_name (str): [Required] The group resource name. Written as
<code>projects/<var>projectID</var>/groups/<var>group_name</var></code>.
Call
<a href=\"/error-reporting/reference/rest/v1beta1/projects.groupStats/list\">
<code>groupStats.list</code></a> to return a list of groups belonging to
this project.
Example: <code>projects/my-project-123/groups/my-group</code>
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.ErrorGroup` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = error_group_service_pb2.GetGroupRequest(
group_name=group_name, )
return self._get_group(
request, retry=retry, timeout=timeout, metadata=metadata)
def update_group(self,
group,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Replace the data for the specified group.
Fails if the group does not exist.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorGroupServiceClient()
>>>
>>> group = {}
>>>
>>> response = client.update_group(group)
Args:
group (Union[dict, ~google.cloud.errorreporting_v1beta1.types.ErrorGroup]): [Required] The group which replaces the resource on the server.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.errorreporting_v1beta1.types.ErrorGroup`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.ErrorGroup` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
if metadata is None:
metadata = []
metadata = list(metadata)
request = error_group_service_pb2.UpdateGroupRequest(group=group, )
return self._update_group(
request, retry=retry, timeout=timeout, metadata=metadata)
| [
"[email protected]"
] | |
83e68fc46fb7aa2a009ca2fa890446accaf1b950 | 55a6a35f7720089f8c7dcc083e3600cfbca0e6a2 | /setup.py | 3f401546aefe0ab2c9692fd02547d4ec9866e8f6 | [
"MIT"
] | permissive | SirMathhman/context_menu | 8d878aa94c23f225b37e6f89e00cd31cec62d65f | bb619d06e15798de8ceb0ddd252d7ae26a492947 | refs/heads/master | 2022-09-04T01:03:28.354901 | 2020-05-31T17:11:01 | 2020-05-31T17:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | import pathlib
import setuptools
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setuptools.setup(
name="context_menu",
version="1.0.0",
description="Library to create cross-platform native context menus.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/saleguas/context_menu",
author="Salvador Aleguas",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=setuptools.find_packages(),
)
| [
"[email protected]"
] | |
37ee79c2c93d9f8ed6baae1d8a818870133f718d | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /test/test_nhl_stats_standing.py | 33a21fdb377d2716a2c8b6e6677a31bbaea3972a | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | # coding: utf-8
"""
NHL v3 Stats
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import sportsdata.nhl_stats
from sportsdata.nhl_stats.models.nhl_stats_standing import NhlStatsStanding # noqa: E501
from sportsdata.nhl_stats.rest import ApiException
class TestNhlStatsStanding(unittest.TestCase):
"""NhlStatsStanding unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNhlStatsStanding(self):
"""Test NhlStatsStanding"""
# FIXME: construct object with mandatory attributes with example values
# model = sportsdata.nhl_stats.models.nhl_stats_standing.NhlStatsStanding() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6ea1daf688c5dea9387fb2ba10c5fbdfb8ce9008 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_QC2102.py | 96caeab00be7aac3bbb16672f84d220d9143ee46 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | # qubit number=4
# total number=33
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=20
prog.cz(input_qubit[0],input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=23
prog.cz(input_qubit[0],input_qubit[3]) # number=24
prog.h(input_qubit[3]) # number=25
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.y(input_qubit[2]) # number=18
prog.z(input_qubit[3]) # number=28
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[1]) # number=19
prog.h(input_qubit[0]) # number=15
prog.cz(input_qubit[2],input_qubit[0]) # number=16
prog.h(input_qubit[0]) # number=17
prog.y(input_qubit[1]) # number=26
prog.y(input_qubit[1]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=29
prog.swap(input_qubit[1],input_qubit[0]) # number=30
prog.x(input_qubit[1]) # number=31
prog.x(input_qubit[1]) # number=32
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2102.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
4fa76e84426de96dcf88970cf3e131fedae3b972 | a0d8e69fb80c5bcf7bdfd2c599f83fe71b04f48f | /lab4/mysite/settings.py | 28d0892856efc88363d768d3008dc78678b2ecaa | [] | no_license | DawidPawlowski123/aplikacje-internetowe-Dawid-Pawlowski-185ic | abb0d177ee5919294ee76af651c1c615053b1347 | 5e40ed6f0ff7615405d7ec38a649a2bd7baaff97 | refs/heads/main | 2023-02-25T08:39:28.672531 | 2021-01-29T14:43:00 | 2021-01-29T14:43:00 | 312,213,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,824 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '69wd+sx_)dzst15rojvt-f$16r=3x0x8ql=lv8%(mtbv*!_c9l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'post',
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_auth',
'rest_auth.registration',
# Swagger
'drf_yasg',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
#'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1 | [
"[email protected]"
] | |
d59bbb75712262a49cc6527e34bb5d872fe48b59 | 712085c99797a14c2c5cff0e46a3de33b307d5dd | /eveuniverse/management/commands/eveuniverse_purge_data.py | b312e93dcd044b42f0c88a8e0a6ce4b1ff6c30df | [
"MIT"
] | permissive | staropera/django-eveuniverse | eeb86a8086fb83e853d0ae5562a97d6969a48039 | 4dd875238d475c5dd0355283b6257e3bcbad2d8b | refs/heads/master | 2023-07-05T05:53:11.295782 | 2021-07-03T13:21:31 | 2021-07-03T13:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from ... import __title__
from ...models import EveUniverseBaseModel
from ...utils import LoggerAddTag
from . import get_input
logger = LoggerAddTag(logging.getLogger(__name__), __title__)
class Command(BaseCommand):
help = (
"Removes all app-related data from the database. "
"Run this command before zero migrations, "
"which would otherwise fail due to FK constraints."
)
def _purge_all_data(self):
"""updates all SDE models from ESI and provides progress output"""
with transaction.atomic():
for MyModel in EveUniverseBaseModel.all_models():
self.stdout.write(
"Deleting {:,} objects from {}".format(
MyModel.objects.count(),
MyModel.__name__,
)
)
MyModel.objects.all().delete()
def handle(self, *args, **options):
self.stdout.write(
"This command will delete all app related data in the database. "
"This can not be undone. Note that this can disrupt other apps "
"that relate to this data. Use with caution."
)
user_input = get_input("Are you sure you want to proceed? (y/N)?")
if user_input.lower() == "y":
self.stdout.write("Starting data purge. Please stand by.")
self._purge_all_data()
self.stdout.write(self.style.SUCCESS("Purge complete!"))
else:
self.stdout.write(self.style.WARNING("Aborted"))
| [
"[email protected]"
] | |
7556a5b4a26081f1b04ac88b64486ede82e59291 | 01175abc7d44d788a322ad2c6cdc7ea5daee4165 | /vistrails_current/vistrails/db/versions/v1_0_2/translate/v1_0_3.py | d4d7a8c2953be830c8a9c81ded135ef7eec8b753 | [
"BSD-3-Clause"
] | permissive | lumig242/VisTrailsRecommendation | 917c53fbd940b3075e629d15ed41725c01db6ce3 | 23ef56ec24b85c82416e1437a08381635328abe5 | refs/heads/master | 2020-04-11T01:20:50.193303 | 2016-01-20T23:30:10 | 2016-01-20T23:30:10 | 50,065,923 | 3 | 0 | null | 2016-01-20T22:49:17 | 2016-01-20T22:49:17 | null | UTF-8 | Python | false | false | 9,080 | py | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from vistrails.db.versions.v1_0_2.domain import DBVistrail, DBAnnotation, \
DBWorkflow, DBLog, DBRegistry, \
DBPortSpec, DBAdd, DBChange, DBDelete
from vistrails.core import debug
from vistrails.core.system import get_elementtree_library
ElementTree = get_elementtree_library()
import unittest
id_scope = None
def update_portSpec(old_obj, translate_dict):
global id_scope
sigs = []
defaults = []
labels = []
for psi in sorted(old_obj.db_portSpecItems, key=lambda x: x.db_pos):
sigs.append((psi.db_package, psi.db_module, psi.db_namespace))
defaults.append(psi.db_default)
labels.append(psi.db_label)
new_obj = DBPortSpec.update_version(old_obj, translate_dict)
sigstring = '(' + ','.join('%s:%s%s' %
(s[0], s[1], ":%s" % s[2] if s[2] else "")
for s in sigs) + ')'
new_obj.db_sigstring = sigstring
if all(not d for d in defaults):
new_obj.db_defaults = None
else:
new_obj.db_defaults = unicode(defaults)
if all(not label for label in labels):
new_obj.db_labels = None
else:
new_obj.db_labels = unicode(labels)
return new_obj
def update_portSpecs(old_obj, translate_dict):
new_port_specs = []
for port_spec in old_obj.db_portSpecs:
new_port_specs.append(update_portSpec(port_spec, translate_dict))
return new_port_specs
def update_portSpec_op(old_obj, translate_dict):
return update_portSpec(old_obj.db_data, translate_dict)
def translateVistrail(_vistrail):
""" Translate new DBVistrailVariable based vistrail variables to old
annotation based type """
global id_scope
def update_workflow(old_obj, trans_dict):
return DBWorkflow.update_version(old_obj.db_workflow,
trans_dict, DBWorkflow())
def update_operations(old_obj, trans_dict):
new_ops = []
for obj in old_obj.db_operations:
if obj.vtType == 'delete':
new_ops.append(DBDelete.update_version(obj, trans_dict))
elif obj.vtType == 'add':
if obj.db_what == 'portSpec':
trans_dict['DBAdd'] = {'data': update_portSpec_op}
new_op = DBAdd.update_version(obj, trans_dict)
new_ops.append(new_op)
del trans_dict['DBAdd']
else:
new_op = DBAdd.update_version(obj, trans_dict)
new_ops.append(new_op)
elif obj.vtType == 'change':
if obj.db_what == 'portSpec':
trans_dict['DBChange'] = {'data': update_portSpec_op}
new_op = DBChange.update_version(obj, trans_dict)
new_ops.append(new_op)
del trans_dict['DBChange']
else:
new_op = DBChange.update_version(obj, trans_dict)
new_ops.append(new_op)
return new_ops
vistrail = DBVistrail()
id_scope = vistrail.idScope
def update_annotations(old_obj, trans_dict):
new_annotations = []
for a in old_obj.db_annotations:
new_annotations.append(DBAnnotation.update_version(a,
translate_dict))
id_scope.updateBeginId(DBAnnotation.vtType, a.db_id)
vars = {}
for var in old_obj.db_vistrailVariables:
descriptor = (var.db_package, var.db_module, var.db_namespace)
vars[var.db_name] = (var.db_uuid, descriptor, var.db_value)
if vars:
new_id = id_scope.getNewId(DBAnnotation.vtType)
annotation = DBAnnotation(id=new_id, key='__vistrail_vars__',
value=str(vars))
new_annotations.append(annotation)
return new_annotations
translate_dict = {'DBModule': {'portSpecs': update_portSpecs},
'DBModuleDescriptor': {'portSpecs': update_portSpecs},
'DBAction': {'operations': update_operations},
'DBGroup': {'workflow': update_workflow},
'DBVistrail': {'annotations': update_annotations},
}
vistrail = DBVistrail.update_version(_vistrail, translate_dict, vistrail)
if _vistrail.db_parameter_explorations:
debug.warning(("Vistrail contains %s parameter explorations that "
"cannot be converted") % len(_vistrail.db_parameter_explorations))
vistrail.db_version = '1.0.2'
return vistrail
def translateWorkflow(_workflow):
def update_workflow(old_obj, translate_dict):
return DBWorkflow.update_version(old_obj.db_workflow, translate_dict)
translate_dict = {'DBModule': {'portSpecs': update_portSpecs},
'DBGroup': {'workflow': update_workflow}}
workflow = DBWorkflow.update_version(_workflow, translate_dict)
workflow.db_version = '1.0.2'
return workflow
def translateLog(_log):
translate_dict = {}
log = DBLog.update_version(_log, translate_dict)
log.db_version = '1.0.2'
return log
def translateRegistry(_registry):
global id_scope
translate_dict = {'DBModuleDescriptor': {'portSpecs': update_portSpecs}}
registry = DBRegistry()
id_scope = registry.idScope
vistrail = DBRegistry.update_version(_registry, translate_dict, registry)
registry.db_version = '1.0.2'
return registry
class TestTranslate(unittest.TestCase):
def testParamexp(self):
"""test translating parameter explorations from 1.0.3 to 1.0.2"""
from vistrails.db.services.io import open_bundle_from_zip_xml
from vistrails.core.system import vistrails_root_directory
import os
(save_bundle, vt_save_dir) = open_bundle_from_zip_xml(DBVistrail.vtType, \
os.path.join(vistrails_root_directory(),
'tests/resources/paramexp-1.0.3.vt'))
vistrail = translateVistrail(save_bundle.vistrail)
# paramexps cannot be downgraded but should produce a warning
def testVistrailvars(self):
"""test translating vistrail variables from 1.0.3 to 1.0.2"""
from vistrails.db.services.io import open_bundle_from_zip_xml
from vistrails.core.system import vistrails_root_directory
import os
(save_bundle, vt_save_dir) = open_bundle_from_zip_xml(DBVistrail.vtType, \
os.path.join(vistrails_root_directory(),
'tests/resources/visvar-1.0.3.vt'))
vistrail = translateVistrail(save_bundle.vistrail)
visvars = vistrail.db_annotations_key_index['__vistrail_vars__']
self.assertTrue(visvars.db_value)
if __name__ == '__main__':
from vistrails.gui.application import start_application
v = start_application({'interactiveMode': False,
'nologger': True,
'singleInstance': False,
'fixedSpreadsheetCells': True})
unittest.main()
| [
"[email protected]"
] | |
e4ba4b294ec8c74cad7bd39a9a780e9bb14e7b7b | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_schizConnect/unsupervised analysis/NMorphCH/pcatv_NMoprhCH_scz.py | f1f9355da4350f7f69e3cf1ebc2aaae8b3e4eec8 | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,058 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 27 18:11:55 2017
@author: ad247405
"""
import os
import json
import time
import shutil
from itertools import product
from collections import OrderedDict
import numpy as np
import scipy
import pandas as pd
import sklearn.decomposition
from sklearn.cross_validation import StratifiedKFold
import nibabel
import parsimony.functions.nesterov.tv
import pca_tv
import metrics
from brainomics import array_utils
import brainomics.cluster_gabriel as clust_utils
import nibabel as nib
import parsimony.functions.nesterov.tv as tv_helper
################
# Input & output #
##################
INPUT_BASE_DIR ='/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/Freesurfer'
INPUT_MASK = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/Freesurfer/data/mask.npy'
INPUT_DATA_X = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/Freesurfer/data/X_scz_only.npy'
OUTPUT_DIR = '/neurospin/brainomics/2016_schizConnect/analysis/NMorphCH/Freesurfer/results/pcatv_scz'
#############################################################################
##############
# Parameters #
##############
# Parameters for the function create_config
# Note that value at index 1 will be the name of the task on the cluster
CONFIGS = [[5,"FS_pcatv_NMoprhCH_scz", "config.json", True]]
N_COMP = 10
PARAMS= [('struct_pca', 0.1, 0.5, 0.5),('struct_pca', 0.1, 0.5, 0.8),\
('struct_pca', 0.1, 0.5, 0.1),('struct_pca', 0.1, 0.1, 0.8),\
('struct_pca', 0.1, 0.1, 0.1),('struct_pca', 0.1, 0.1, 0.5),\
('struct_pca', 0.1, 0.8, 0.1),('struct_pca', 0.1, 0.8, 0.5),\
('struct_pca', 0.1, 0.8, 0.8),("pca", 0.0, 0.0, 0.0)]
#
#
#############
# Functions #
#############
def load_globals(config):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
STRUCTURE = np.load(config["structure"])
A = tv_helper.A_from_mask(STRUCTURE)
GLOBAL.Atv = A
GLOBAL.FULL_RESAMPLE = config['full_resample']
def resample(config, resample_nb):
import mapreduce as GLOBAL # access to global variables
GLOBAL.DATA = GLOBAL.load_data(config["data"])
resample = config["resample"][resample_nb]
if resample is not None:
GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k][idx, ...] for idx in resample]
for k in GLOBAL.DATA}
else: # resample is None train == test
GLOBAL.DATA_RESAMPLED = {k: [GLOBAL.DATA[k] for idx in [0, 1]]
for k in GLOBAL.DATA}
def mapper(key, output_collector):
import mapreduce as GLOBAL # access to global variables:
model_name, global_pen, tv_ratio, l1_ratio = key
if model_name == 'pca':
global_pen = tv_ratio = l1_ratio = 0
if model_name == 'sparse_pca':
global_pen = tv_ratio = 0
ll1=l1_ratio
if model_name == 'struct_pca':
ltv = global_pen * tv_ratio
ll1 = l1_ratio * global_pen * (1 - tv_ratio)
ll2 = (1 - l1_ratio) * global_pen * (1 - tv_ratio)
assert(np.allclose(ll1 + ll2 + ltv, global_pen))
penalty_start = 3
X_train = GLOBAL.DATA_RESAMPLED["X"][0][:,penalty_start:]
print (X_train.shape)
n, p = X_train.shape
X_test = GLOBAL.DATA_RESAMPLED["X"][1][:,penalty_start:]
Atv = GLOBAL.Atv
# Fit model
if model_name == 'pca':
model = sklearn.decomposition.PCA(n_components=N_COMP)
if model_name == 'sparse_pca':
model = sklearn.decomposition.SparsePCA(n_components=N_COMP,alpha = ll1)
if model_name == 'struct_pca':
model = pca_tv.PCA_L1_L2_TV(n_components=N_COMP,
l1=ll1, l2=ll2, ltv=ltv,
Atv=Atv,
criterion="frobenius",
eps=1e-6,
max_iter=100,
inner_max_iter=int(1e4),
output=False)
model.fit(X_train)
# Save the projectors
if (model_name == 'pca') or (model_name == 'sparse_pca'):
V = model.components_.T
if model_name == 'struct_pca' :
V = model.V
# Project train & test data
if (model_name == 'pca')or (model_name == 'sparse_pca'):
X_train_transform = model.transform(X_train)
X_test_transform = model.transform(X_test)
if (model_name == 'struct_pca'):
X_train_transform, _ = model.transform(X_train)
X_test_transform, _ = model.transform(X_test)
# Reconstruct train & test data
# For SparsePCA or PCA, the formula is: UV^t (U is given by transform)
# For StructPCA this is implemented in the predict method (which uses
# transform)
if (model_name == 'pca') or (model_name == 'sparse_pca'):
X_train_predict = np.dot(X_train_transform, V.T)
X_test_predict = np.dot(X_test_transform, V.T)
if (model_name == 'struct_pca') :
X_train_predict = model.predict(X_train)
X_test_predict = model.predict(X_test)
# Compute Frobenius norm between original and recontructed datasets
frobenius_train = np.linalg.norm(X_train - X_train_predict, 'fro')
frobenius_test = np.linalg.norm(X_test - X_test_predict, 'fro')
print(frobenius_test)
# Compute explained variance ratio
evr_train = metrics.adjusted_explained_variance(X_train_transform)
evr_train /= np.var(X_train, axis=0).sum()
evr_test = metrics.adjusted_explained_variance(X_test_transform)
evr_test /= np.var(X_test, axis=0).sum()
# Remove predicted values (they are huge)
del X_train_predict, X_test_predict
ret = dict(frobenius_train=frobenius_train,
frobenius_test=frobenius_test,
components=V,
X_train_transform=X_train_transform,
X_test_transform=X_test_transform,
evr_train=evr_train,
evr_test=evr_test)
output_collector.collect(key, ret)
def reducer(key, values):
output_collectors = values
global N_COMP
import mapreduce as GLOBAL
N_FOLDS = GLOBAL.N_FOLDS
components=np.zeros((67665,N_COMP,5))
frobenius_train = np.zeros((N_COMP,5))
frobenius_test = np.zeros((1,5))
evr_train = np.zeros((N_COMP,5))
evr_test = np.zeros((N_COMP,5))
l0 = np.zeros((N_COMP,5))
l1 = np.zeros((N_COMP,5))
l2 =np.zeros((N_COMP,5))
tv = np.zeros((N_COMP,5))
# N_FOLDS is the number of true folds (not the number of resamplings)
# key : string of intermediary key
# load return dict corresponding to mapper ouput. they need to be loaded.]
# Avoid taking into account the fold 0
for item in output_collectors:
if item !=N_FOLDS:
values = output_collectors[item+1].load()
components[:,:,item] = values["components"]
frobenius_train[:,item] = values["frobenius_train"]
frobenius_test[:,item] = values["frobenius_test"]
# l0[:,item] = values["l0"]
# l1[:,item] = values["l1"]
# l2[:,item] = values["l2"]
# tv[:,item] = values["tv"]
# evr_train[:,item] = values["evr_train"]
# evr_test[:,item] = values["evr_test"]
# times[:,item] = values["time"]
#Solve non-identifiability problem (baseline = first fold)
for i in range(1,5):
if np.abs(np.corrcoef(components[:,0,0],components[:,0,i])[0,1]) < np.abs(np.corrcoef(components[:,0,0],components[:,1,i])[0,1]):
print("components inverted")
print(i)
temp_comp1 = np.copy(components[:,1,i])
components[:,1,i] = components[:,0,i]
components[:,0,i] = temp_comp1
if np.abs(np.corrcoef(components[:,1,0],components[:,1,i])[0,1]) < np.abs(np.corrcoef(components[:,1,0],components[:,2,i])[0,1]):
print("components inverted")
print(i)
temp_comp2 = np.copy(components[:,2,i])
components[:,2,i] = components[:,1,i]
components[:,1,i] = temp_comp2
# Thesholded components (list of tuples (comp, threshold))
thresh_components = np.empty(components.shape)
thresholds = np.empty((N_COMP, N_FOLDS))
for l in range(N_FOLDS):
for k in range(N_COMP):
thresh_comp, t = array_utils.arr_threshold_from_norm2_ratio(
components[:, k, l],
.99)
thresh_components[:, k, l] = thresh_comp
thresholds[k, l] = t
# Average precision/recall across folds for each component
av_frobenius_train = frobenius_train.mean(axis=1)
av_frobenius_test = frobenius_test.mean(axis=1)
# Align sign of loading vectors to the first fold for each component
aligned_thresh_comp = np.copy(thresh_components)
REF_FOLD_NUMBER = 0
for k in range(N_COMP):
for i in range(N_FOLDS):
ref = thresh_components[:, k, REF_FOLD_NUMBER].T
if i != REF_FOLD_NUMBER:
r = np.corrcoef(thresh_components[:, k, i].T,
ref)
if r[0, 1] < 0:
#print "Reverting comp {k} of fold {i} for model {key}".format(i=i+1, k=k, key=key)
aligned_thresh_comp[:, k, i] *= -1
# Compute fleiss_kappa and DICE on thresholded components
fleiss_kappas = np.empty(N_COMP)
dice_bars = np.empty(N_COMP)
dices = np.zeros((10,N_COMP))
for k in range(N_COMP):
# One component accross folds
thresh_comp = aligned_thresh_comp[:, k, :]
fleiss_kappas[k] = metrics.fleiss_kappa(thresh_comp)
dice_bars[k],dices[:,k] = metrics.dice_bar(thresh_comp)
# print dices.mean(axis=1)
# dices_mean_path = os.path.join(OUTPUT_DIR,'fmri_5folds/results','dices_mean_%s.npy' %key[0])
# if key[0] == 'struct_pca' and key[2]==1e-6:
# dices_mean_path = os.path.join(OUTPUT_DIR,'fmri_5folds/results','dices_mean_%s.npy' %'enet_pca')
# print dices_mean_path
# np.save(dices_mean_path,dices.mean(axis=1) )
print(key)
scores = OrderedDict((
('model', key[0]),
('global_pen', key[1]),
('tv_ratio', key[2]),
('l1_ratio', key[3]),
('frobenius_train', av_frobenius_train[0]),
('frobenius_test', av_frobenius_test[0]),
('kappa_0', fleiss_kappas[0]),
('kappa_1', fleiss_kappas[1]),
('kappa_2', fleiss_kappas[2]),
('kappa_mean', np.mean(fleiss_kappas)),
('dice_bar_0', dice_bars[0]),
('dice_bar_1', dice_bars[1]),
('dice_bar_2', dice_bars[2]),
('dices_mean',dice_bars.mean()),
('evr_test_0',evr_test.mean(axis=1)[0]),
('evr_test_1',evr_test.mean(axis=1)[1]),
('evr_test_2',evr_test.mean(axis=1)[2]),
('evr_test_sum',evr_test.mean(axis=1)[0]+evr_test.mean(axis=1)[1]+evr_test.mean(axis=1)[2]),
('frob_test_fold1',frobenius_test[0][0]),
('frob_test_fold2',frobenius_test[0][1]),
('frob_test_fold3',frobenius_test[0][2]),
('frob_test_fold4',frobenius_test[0][3]),
('frob_test_fold5',frobenius_test[0][4])))
return scores
def run_test(wd, config):
print("In run_test")
import mapreduce
os.chdir(wd)
params = config['params'][-1]
key = '_'.join([str(p) for p in params])
load_globals(config)
OUTPUT = os.path.join('test', key)
oc = mapreduce.OutputCollector(OUTPUT)
X = np.load(config['data']['X'])
mapreduce.DATA_RESAMPLED = {}
mapreduce.DATA_RESAMPLED["X"] = [X, X]
mapper(params, oc)
def create_config(y, n_folds, output_dir, filename,
include_full_resample=True):
full_output_dir = os.path.join(OUTPUT_DIR, output_dir)
if not os.path.exists(full_output_dir):
os.makedirs(full_output_dir)
# skf = StratifiedKFold(y=y,
# n_folds=n_folds)
# resample_index = [[tr.tolist(), te.tolist()] for tr, te in skf]
resample_index = []
if include_full_resample:
resample_index.insert(0, None) # first fold is None
# Create config file
user_func_filename = os.path.abspath(__file__)
config = dict(data=dict(X="X_scz_only.npy"),
structure="mask.npy",
params=PARAMS,
resample=resample_index,
map_output="results",
n_folds=n_folds,
full_resample=include_full_resample,
user_func=user_func_filename,
reduce_group_by="params",
reduce_output="results.csv")
config_full_filename = os.path.join(full_output_dir, filename)
json.dump(config, open(config_full_filename, "w"))
return config
#################
# Actual script #
#################
if __name__ == "__main__":
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
#Retreive variables
X= np.load(INPUT_DATA_X)
y = np.ones(X.shape[0])
shutil.copy(INPUT_DATA_X, os.path.join(OUTPUT_DIR,"FS_pcatv_NMoprhCH_scz"))
shutil.copy(INPUT_MASK, os.path.join(OUTPUT_DIR,"FS_pcatv_NMoprhCH_scz"))
#############################################################################
# Create config files
config_5folds = create_config(y, *(CONFIGS[0]))
DEBUG = False
if DEBUG:
run_test(OUTPUT_DIR, config_5folds)
# Build utils files: sync (push/pull) and PBS
sync_push_filename, sync_pull_filename, WD_CLUSTER = \
clust_utils.gabriel_make_sync_data_files(os.path.join(OUTPUT_DIR,"FS_pcatv_NMoprhCH_scz"))
cmd = "mapreduce.py --map %s/config.json" % WD_CLUSTER
clust_utils.gabriel_make_qsub_job_files(os.path.join(OUTPUT_DIR,"FS_pcatv_NMoprhCH_scz"), cmd,walltime = "250:00:00") | [
"[email protected]"
] | |
41a5fb2910a6e40b7bd1be04cfbbdb0ef8e86f82 | 50f6ddd45ae2811ae4580ceb4a5617106145769f | /player461.py | 5fbfcb44c5bea39949159c1ff65d614cf4d54fcb | [] | no_license | MohammedFerozHussain/guvi1 | 432f89701b8d0d6e75229dbd199e362a42e2df53 | cd4ae6c903bd5de883234d076aec770e2c8bcd41 | refs/heads/master | 2020-04-15T05:10:19.398719 | 2019-08-17T18:16:39 | 2019-08-17T18:16:39 | 164,411,569 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | import math
A=int(input())
c=math.radians(A)
x=math.sin(c)
c1=(round(x,1))
k=abs(c1)
if k<1 :
print(c1)
else:
print(round(c1))
| [
"[email protected]"
] | |
4108e3aeb18c96e6d9ded60bf08972664cb1c6bc | 941cbcc815da9927c16291fd0cf341fdf26d4b4b | /Testing/Selenium/200518_First_demo.py | c33183c4fbe3ba07578a79c3b87637358574efb9 | [] | no_license | narru888/PythonWork-py37- | 27de004157efdf42972f66b20872e17de8bc676c | f9cb1670fb84b9eb8aaaf7cd5cf9139ab4ef4053 | refs/heads/master | 2022-12-09T04:58:06.068302 | 2020-09-23T09:47:40 | 2020-09-23T09:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,787 | py | from selenium import webdriver
# 啟動瀏覽器驅動和瀏覽器,返回的是WebDriver對象
# 參數為chrome驅動的路徑
wd = webdriver.Chrome(r'd:\Programming\WorkPlace\PythonWork(py37)\Testing\Selenium\chromedriver\chromedriver.exe')
# 跳轉頁面
wd.get(url='https://www.google.com.tw/')
# 因為有可能會遇到一種情況,就是web server尚未向我們(用戶)的瀏覽器返回頁面時,我們的程序繼續運行下一行代碼,導致找不到要的Element。
# 所以這時可以使用implicitly_wait(5),他代表的是最多等5秒,每0.5秒會去搜索一次,如果搜索到了就繼續下一行代碼,反之則等待下一個0.5秒搜索。
wd.implicitly_wait(5)
"""1. 現在在Google首頁"""
"""
整體:
wd.implicitly_wait(5): 隱式等待
wd.get(url='https://www.google.com.tw/'): 打開網站
wd.title: 當前窗口的標記欄(google分頁上顯示的那個標題)
wd.quit(): 關閉瀏覽器驅動和瀏覽器
wd.current_window_handle: 當前窗口的handle
wd.switch_to.window(handle): 切換窗口
wd.switch_to.frame('frame的id或WebElement對象'): 切換frame
wd.switch_to.default_content(): 切換回到主Html
選擇元素:
find_element找不到對象返回異常;find_elements找不到對象返回[]。
通過class找第一個對象: find_element_by_class_name
通過class找所有對象(返回列表): find_elements_by_class_name
通過id找第一個對象: find_element_by_id
通過id找所有對象(返回列表): find_elements_by_id
通過標籤名(EX:div)找對象: find_element(s)_by_tag_name
操作元素:
點擊對象(通常用於button或a對象): WebElement對象.click()
輸入字符串: WebElement對象.send_keys()
獲取文本內容(介面上看的到的文本): WebElement對象.text
獲取元素屬性: WebElement對象.get_attribute(EX:'a')
獲取整個元素對應的HTML文本內容: WebElement對象.get_attribute('outerHTML')
獲取此元素内部所有的HTML文本內容: WebElement對象.get_attribute('innerHTML')
獲取輸入框裡面的文字(不能用.text獲取): WebElement對象.get_attribute('value')
獲取文本內容(介面上看不到的文本): WebElement對象.get_attribute('textContent'或'innerText')
CSS選擇器:
通過class找對象: find_element(s)_by_css_selector('.class值')
通過id找對象: find_element(s)_by_css_selector('#id值')
通過標籤名找對象: find_element(s)_by_css_selector('標籤名')
通過屬性找對象: find_element(s)_by_css_selector('[屬姓名]') EX: [href],也可以指定值[href="http://www.python3.vip/"]
通過屬性與其他方法混用:find_element(s)_by_css_selector('#id值[屬姓名]'或'.class值[屬姓名]'或'標籤名[屬姓名]')
獲取子元素對象: find_element(s)_by_css_selector('元素1 > 元素2') EX: #content > span 代表id=content的標籤中標籤名為span的子元素
獲取後代元素對象: find_element(s)_by_css_selector('元素1 元素2') EX: #content span 代表id=content的標籤中標籤名為span的後代元素
多個查詢加,號(代表或):find_element(s)_by_css_selector('#aa , div')
選擇第幾個子元素: find_element(s)_by_css_selector(':nth-child(數字)') EX: div:nth-child(2) 代表標簽名為div且剛好是在父元素中的第二子元素的標籤
選擇倒數第幾個子元素: find_element(s)_by_css_selector(':nth-last-child(數字)')
選擇第幾個div子元素: find_element(s)_by_css_selector('div:nth-of-type(數字)') EX:div:nth-of-type(2) 代表找出子元素中第二個標籤名為div的標籤
選擇倒數第幾個div子元素: find_element(s)_by_css_selector('div:nth-last-of-type(數字)')
選擇第奇數個子元素: find_element(s)_by_css_selector(':nth-child(odd)')
選擇第偶數個子元素: find_element(s)_by_css_selector(':nth-child(even)')
兄弟節點: find_element(s)_by_css_selector('元素1~元素2') EX: h3~span 代表找出兄弟節點中有h3的span標籤
相鄰節點: find_element(s)_by_css_selector('元素1+元素2') EX: h3+span 代表找出相鄰節點中有h3的span標籤
※ 在瀏覽器中也能透過CSS選擇器來進行查找(非常方便):
先按F12 -> 在上面一排選Elements -> 按 ctrl+f -> 輸入查詢條件
"""
# 返回WebElement對象。
ele_search = wd.find_element_by_class_name('gLFyf')
# print(ele_search)
# 在輸入框裡輸入"白月黑羽",並加上Enter鍵(\n)
ele_search.send_keys('白月黑羽\n')
"""2. 現在在Google搜索「白月黑羽」的頁面"""
div = wd.find_element_by_class_name('r')
div.find_element_by_tag_name('a').click()
"""3. 現在在「白月黑羽」首面"""
t = wd.find_element_by_css_selector('.nav-item')
print(t)
# 關閉瀏覽器驅動和瀏覽器
# wd.quit() | [
"[email protected]"
] | |
5f3faf4c4562e9b0e64d315ddcd5eee864cc3b85 | 3325f16c04ca8e641cbd58e396f983542b793091 | /Seção 10 - Epressões Lambdas e Funções Integradas/any_all.py | e496ebcb6cf885ddd9c9be5e515223aa5b9bd253 | [] | no_license | romulovieira777/Programacao_em_Python_Essencial | ac929fbbd6a002bcc689b8d6e54d46177632c169 | e81d219db773d562841203ea370bf4f098c4bd21 | refs/heads/master | 2023-06-11T16:06:36.971113 | 2021-07-06T20:57:25 | 2021-07-06T20:57:25 | 269,442,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | """
Any e All
all() -> Retorna True se todos os elementos do iterável são verdadeiros ou ainda se o iterável está vazio.
# Exemplo all()
print(all([0, 1, 2, 3, 4])) # Todos os números são verdadeiros? False
print(all([1, 2, 3, 4])) # Todos os números são verdadeiros? True
print(all([])) # Todos os números são verdadeiro? True
print(all((1, 2, 3, 4))) # Todos os números são verdadeiros? True
print(all({1, 2, 3, 4})) # Todos os números são veradeiros? True
print(all('Geek')) # Todos os números são verdadeiros? True
nomes = ['Carlos', 'Camila', 'Carla', 'Cassiano', 'Cristina']
print(all([nome[0] == 'C' for nome in nomes]))
# OBS: Um iterável vazio convertido em boolean é False, mas o all() entende como True
print(all([letra for letra in 'eio' if letra in 'aeiou']))
print(all([num for num in [4, 2, 10, 6, 8] if num % 2 == 0]))
any() -> Retorna True se qualquer elemento do iterável for verdadeiro. Se o iterável estiver vazio, retorna False
print(any([0, 1, 2, 3, 4])) # True
print(any([0, False, {}, (), []])) # False
nomes = ['Carlos', 'Camila', 'Carla', 'Cassiano', 'Cristina', 'Vanessa']
print(any([nome[0] == 'C' for nome in nomes]))
print(any([num for num in [4, 2, 10, 6, 8, 9] if num % 2 == 0]))
"""
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.