hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1389758aa9eb7eb25584e0a02ef64f27158cea18 | 2,395 | py | Python | cli/polyaxon/managers/cli.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/managers/cli.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/managers/cli.py | hackerwins/polyaxon | ff56a098283ca872abfbaae6ba8abba479ffa394 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from distutils.version import LooseVersion # pylint:disable=import-error
from polyaxon.managers.base import BaseConfigManager
from polyaxon.schemas.cli.cli_configuration import CliConfigurationConfig
| 32.808219 | 86 | 0.703967 |
13897ff894a1bd33edc2b806b99ab65957690746 | 413 | py | Python | duckql/properties/tests/test_null.py | fossabot/duckql-python | b4aead825ee456d9758db89830c7bca9d5d5106e | [
"MIT"
] | 4 | 2020-04-15T09:35:15.000Z | 2021-11-11T13:03:46.000Z | duckql/properties/tests/test_null.py | fossabot/duckql-python | b4aead825ee456d9758db89830c7bca9d5d5106e | [
"MIT"
] | 2 | 2020-04-08T12:10:56.000Z | 2020-04-15T09:14:44.000Z | duckql/properties/tests/test_null.py | fossabot/duckql-python | b4aead825ee456d9758db89830c7bca9d5d5106e | [
"MIT"
] | 1 | 2020-04-15T09:11:39.000Z | 2020-04-15T09:11:39.000Z | import pytest
from duckql.properties import Null
| 19.666667 | 64 | 0.723971 |
138acf726b02bf36085bf40542bda0ebebd538c5 | 6,190 | py | Python | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/armv6-openbmc-linux-gnueabi/usr/lib/gobject-introspection/giscanner/codegen.py | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | # -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
from contextlib import contextmanager
from . import ast
| 34.775281 | 85 | 0.593538 |
138ad53bc75698fb0a04af0266ae508da388a981 | 6,057 | py | Python | nevergrad/parametrization/utils.py | mehrdad-shokri/nevergrad | 7b68b00c158bf60544bc45997560edf733fb5812 | [
"MIT"
] | 2 | 2021-04-13T12:14:46.000Z | 2021-07-07T14:37:50.000Z | nevergrad/parametrization/utils.py | OregonWebSells/nevergrad | c2b2a0efdca29830ccc9182d8a7ba4d8695f698d | [
"MIT"
] | 1 | 2020-09-25T10:45:06.000Z | 2020-09-25T11:51:13.000Z | nevergrad/parametrization/utils.py | OregonWebSells/nevergrad | c2b2a0efdca29830ccc9182d8a7ba4d8695f698d | [
"MIT"
] | 1 | 2021-04-07T10:34:20.000Z | 2021-04-07T10:34:20.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import shutil
import tempfile
import subprocess
import typing as tp
from pathlib import Path
from nevergrad.common import tools as ngtools
def __enter__(self) -> Path:
super().__enter__()
return self.copyname
class FailedJobError(RuntimeError):
"""Job failed during processing
"""
class CommandFunction:
"""Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout
Parameters
----------
command: list
command to run, as a list
verbose: bool
prints the command and stdout at runtime
cwd: Path/str
path to the location where the command must run from
Returns
-------
str
Everything that has been sent to stdout
"""
def __call__(self, *args: tp.Any, **kwargs: tp.Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs are bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr
"""
# TODO make the following command more robust (probably fails in multiple cases)
full_command = self.command + [str(x) for x in args] + ["--{}={}".format(x, y) for x, y in kwargs.items()]
if self.verbose:
print(f"The following command is sent: {full_command}")
outlines: tp.List[str] = []
with subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, cwd=self.cwd, env=self.env) as process:
try:
assert process.stdout is not None
for line in iter(process.stdout.readline, b''):
if not line:
break
outlines.append(line.decode().strip())
if self.verbose:
print(outlines[-1], flush=True)
except Exception: # pylint: disable=broad-except
process.kill()
process.wait()
raise FailedJobError("Job got killed for an unknown reason.")
stderr = process.communicate()[1] # we already got stdout
stdout = "\n".join(outlines)
retcode = process.poll()
if stderr and (retcode or self.verbose):
print(stderr.decode(), file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr)
raise FailedJobError(stderr.decode()) from subprocess_error
return stdout
| 38.826923 | 133 | 0.639591 |
138b01aa9774bbead45a8dac1264c5149cf9f912 | 568 | py | Python | Section 20/2.Document-transfer_files.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 18 | 2020-04-13T03:14:06.000Z | 2022-03-09T18:54:41.000Z | Section 20/2.Document-transfer_files.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | null | null | null | Section 20/2.Document-transfer_files.py | airbornum/-Complete-Python-Scripting-for-Automation | bc053444f8786259086269ca1713bdb10144dd74 | [
"MIT"
] | 22 | 2020-04-29T21:12:42.000Z | 2022-03-17T18:19:54.000Z | import paramiko
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname='54.165.97.91',username='ec2-user',password='paramiko123',port=22)
sftp_client=ssh.open_sftp()
#sftp_client.get('/home/ec2-user/paramiko_download.txt','paramiko_downloaded_file.txt')
#sftp_client.chdir("/home/ec2-user")
#print(sftp_client.getcwd())
#sftp_client.get('demo.txt','C:\\Users\\Automation\\Desktop\\download_file.txt')
sftp_client.put("transfer_files.py",'/home/ec2-user/transfer_files.py')
sftp_client.close()
ssh.close() | 43.692308 | 88 | 0.769366 |
138ba740eae4da2fa0a99d533446f723a2531106 | 1,522 | py | Python | nimlime_core/utils/internal_tools.py | gmpreussner/Varriount.NimLime | 33da0424248bf9360c2a7cbca4a22da7a8020785 | [
"MIT"
] | null | null | null | nimlime_core/utils/internal_tools.py | gmpreussner/Varriount.NimLime | 33da0424248bf9360c2a7cbca4a22da7a8020785 | [
"MIT"
] | null | null | null | nimlime_core/utils/internal_tools.py | gmpreussner/Varriount.NimLime | 33da0424248bf9360c2a7cbca4a22da7a8020785 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Internal tools for NimLime development & testing.
"""
from pprint import pprint
import sublime
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from functools import wraps
from pstats import Stats
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
debug_on = False
if debug_on:
sublime.message_dialog("NimLime running in debug mode.")
# Debug printer
def print_debug(*args, **kwargs):
"""
Print when debugging.
:type args: Any
:type kwargs: Any
"""
if debug_on:
pprint(*args, **kwargs)
# Profiling functions
profiler = Profile()
profiler_running = False
def profile_func(func):
"""
Decorator which profiles a single function.
Call print_profile_data to print the collected data.
:type func: Callable
:rtype: Callable
"""
return _profile_wrapper
def print_profile_data():
"""
Print the collected profile data.
"""
stream = StringIO()
statistics = Stats(profiler, stream=stream)
statistics.sort_stats('cumulative')
statistics.print_stats()
print(stream.getvalue())
| 20.849315 | 60 | 0.653088 |
138c6bcf0225f274dd1eb1c256462cdafdb949eb | 2,524 | py | Python | test/unit/test_monitor.py | dmvieira/driftage | 830188aa341029cc2a643b2b3b50e625953a35eb | [
"Apache-2.0"
] | 4 | 2020-09-24T23:59:54.000Z | 2020-09-27T16:43:37.000Z | test/unit/test_monitor.py | dmvieira/driftage | 830188aa341029cc2a643b2b3b50e625953a35eb | [
"Apache-2.0"
] | 2 | 2021-03-06T19:55:34.000Z | 2021-03-06T20:06:42.000Z | test/unit/test_monitor.py | dmvieira/driftage | 830188aa341029cc2a643b2b3b50e625953a35eb | [
"Apache-2.0"
] | null | null | null | import orjson
from asynctest import TestCase, Mock, patch
from freezegun import freeze_time
from driftage.monitor import Monitor
| 31.160494 | 75 | 0.587163 |
138d56c884e89de3d6a25a794b256d4f746b9c4d | 354 | py | Python | examples/todo_advanced/main.py | travisluong/fastarg | b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3 | [
"MIT"
] | 1 | 2022-03-27T20:30:45.000Z | 2022-03-27T20:30:45.000Z | examples/todo_advanced/main.py | travisluong/fastarg | b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3 | [
"MIT"
] | null | null | null | examples/todo_advanced/main.py | travisluong/fastarg | b21d5307ce6b296aa16f30bf220ca2ead8e9d4d3 | [
"MIT"
] | null | null | null | import fastarg
import commands.todo as todo
import commands.user as user
app = fastarg.Fastarg(description="productivity app", prog="todo")
app.add_fastarg(todo.app, name="todo")
app.add_fastarg(user.app, name="user")
if __name__ == "__main__":
app.run() | 22.125 | 66 | 0.70339 |
138da1de200b7ec195fd8cfe7cb64a50fd1f3486 | 7,177 | py | Python | tests/test_channel.py | rwilhelm/aiormq | 9aa278e61d16ba18748f5f5a3fc76d0a273fd14a | [
"Apache-2.0"
] | 176 | 2019-01-13T13:41:43.000Z | 2022-03-26T04:01:03.000Z | tests/test_channel.py | rwilhelm/aiormq | 9aa278e61d16ba18748f5f5a3fc76d0a273fd14a | [
"Apache-2.0"
] | 79 | 2019-02-18T17:41:25.000Z | 2022-02-25T11:09:33.000Z | tests/test_channel.py | rwilhelm/aiormq | 9aa278e61d16ba18748f5f5a3fc76d0a273fd14a | [
"Apache-2.0"
] | 54 | 2019-02-19T09:53:12.000Z | 2022-03-28T13:33:29.000Z | import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
| 34.671498 | 80 | 0.722029 |
138dbf1fa3b2ac4c4311547dd1bdd7a67de14e0b | 39 | py | Python | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/stat/mediaclassification/__init__.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | 2 | 2020-08-24T18:04:22.000Z | 2020-08-24T18:04:47.000Z | nitro/resource/stat/mediaclassification/__init__.py | HanseMerkur/nitro-python | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | [
"Apache-2.0"
] | null | null | null | nitro/resource/stat/mediaclassification/__init__.py | HanseMerkur/nitro-python | d03eb11f492a35a2a8b2a140322fbce22d25a8f7 | [
"Apache-2.0"
] | null | null | null | __all__ = ['mediaclassification_stats'] | 39 | 39 | 0.820513 |
138e485745a6d26b22140e7cd765e64928978552 | 455 | py | Python | balanced_parens.py | joeghodsi/interview-questions | 3e4eb76891245ce978cb9171e87d60e3b292b0a8 | [
"Unlicense"
] | 1 | 2018-06-11T18:18:39.000Z | 2018-06-11T18:18:39.000Z | balanced_parens.py | joeghodsi/interview-questions | 3e4eb76891245ce978cb9171e87d60e3b292b0a8 | [
"Unlicense"
] | null | null | null | balanced_parens.py | joeghodsi/interview-questions | 3e4eb76891245ce978cb9171e87d60e3b292b0a8 | [
"Unlicense"
] | null | null | null | '''
Problem description:
Given a string, determine whether or not the parentheses are balanced
'''
def balanced_parens(str):
'''
runtime: O(n)
space : O(1)
'''
if str is None:
return True
open_count = 0
for char in str:
if char == '(':
open_count += 1
elif char == ')':
open_count -= 1
if open_count < 0:
return False
return open_count == 0
| 17.5 | 69 | 0.514286 |
138f08438e2c276d1577956212792c686c9d877c | 6,333 | py | Python | plaso/parsers/winreg_plugins/ccleaner.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 1,253 | 2015-01-02T13:58:02.000Z | 2022-03-31T08:43:39.000Z | plaso/parsers/winreg_plugins/ccleaner.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 3,388 | 2015-01-02T11:17:58.000Z | 2022-03-30T10:21:45.000Z | plaso/parsers/winreg_plugins/ccleaner.py | pyllyukko/plaso | 7533db2d1035ca71d264d6281ebd5db2d073c587 | [
"Apache-2.0"
] | 376 | 2015-01-20T07:04:54.000Z | 2022-03-04T23:53:00.000Z | # -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)
| 33.331579 | 92 | 0.707879 |
139025981058af435a2b6721ba93a59e2cb0b119 | 1,296 | py | Python | pushpluck/base.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | null | null | null | pushpluck/base.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | 2 | 2021-04-02T03:54:12.000Z | 2021-04-23T18:23:03.000Z | pushpluck/base.py | ejconlon/pushpluck | 4e5b8bcff6fe3955e8f25638268569f901815b5a | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Any, TypeVar
X = TypeVar('X')
_UNIT_SINGLETON = Unit()
| 24.923077 | 90 | 0.657407 |
139141d96476a62cf2a8695abfe754ae15ba668e | 2,057 | py | Python | test/cuberead/highres/test_default_high_res.py | CAB-LAB/cube-performance-test | 0ca7dbb56b2937004fb63f8aafdff21fb76263d4 | [
"MIT"
] | null | null | null | test/cuberead/highres/test_default_high_res.py | CAB-LAB/cube-performance-test | 0ca7dbb56b2937004fb63f8aafdff21fb76263d4 | [
"MIT"
] | null | null | null | test/cuberead/highres/test_default_high_res.py | CAB-LAB/cube-performance-test | 0ca7dbb56b2937004fb63f8aafdff21fb76263d4 | [
"MIT"
] | null | null | null | import time
import pytest
from test import config
from test.cube_utils import CubeUtils
ITERATIONS_NUM = getattr(config, 'iterations_num', 1)
ROUNDS_NUM = getattr(config, 'rounds_num', 10)
| 33.177419 | 114 | 0.684492 |
139180db13c406acb2f2910c85de0dfd5a8d2472 | 5,545 | py | Python | tests/components/test_dialogue_flow.py | dyoshiha/mindmeld | 95f0e8482594f00040766a2ee687e9c9338f5a74 | [
"Apache-2.0"
] | 1 | 2019-12-12T12:44:33.000Z | 2019-12-12T12:44:33.000Z | tests/components/test_dialogue_flow.py | AravindR7/mindmeld | 470bba73ac56b6388146212ddaf697097e81cec3 | [
"Apache-2.0"
] | null | null | null | tests/components/test_dialogue_flow.py | AravindR7/mindmeld | 470bba73ac56b6388146212ddaf697097e81cec3 | [
"Apache-2.0"
] | null | null | null | import pytest
from mindmeld.components import Conversation
def assert_reply(directives, templates, *, start_index=0, slots=None):
"""Asserts that the provided directives contain the specified reply
Args:
directives (list[dict[str, dict]]): list of directives returned by application
templates (Union[str, Set[str]]): The reply must be a member of this set.
start_index (int, optional): The index of the first client action associated
with this reply.
slots (dict, optional): The slots to fill the templates
"""
slots = slots or {}
if isinstance(templates, str):
templates = [templates]
texts = set(map(lambda x: x.format(**slots), templates))
assert len(directives) >= start_index + 1
assert directives[start_index]['name'] == 'reply'
assert directives[start_index]['payload']['text'] in texts
def assert_dialogue_state(dm, dialogue_state):
for rule in dm.rules:
if rule.dialogue_state == dialogue_state:
return True
return False
def test_dialogue_flow_async(async_kwik_e_mart_app):
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
assert len(some_handler.rules) == 1
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
def test_dialogue_flow(kwik_e_mart_app):
assert some_handler.flow_state == 'some_handler_flow'
assert 'some_handler' in some_handler.all_flows
dm = some_handler.dialogue_manager
assert_dialogue_state(dm, 'some_handler')
assert_dialogue_state(dm, 'some_handler_flow')
assert len(some_handler.rules) == 0
assert len(some_handler.rules) == 1
assert len(some_handler.rules) == 2
assert 'some_flow_handler_2' in some_handler.exit_flow_states
| 39.326241 | 99 | 0.741389 |
1395c34c642a4ba06cd80eeb8c512c19499d8a1b | 1,707 | py | Python | mine/src/main/python/SVM.py | nextzlog/mine | 49ef0bea4796920d8696dc5f076f86c0ab17be80 | [
"BSD-3-Clause"
] | 3 | 2020-06-04T15:25:37.000Z | 2020-06-06T05:09:07.000Z | mine/src/main/python/SVM.py | nextzlog/mine | 49ef0bea4796920d8696dc5f076f86c0ab17be80 | [
"BSD-3-Clause"
] | null | null | null | mine/src/main/python/SVM.py | nextzlog/mine | 49ef0bea4796920d8696dc5f076f86c0ab17be80 | [
"BSD-3-Clause"
] | null | null | null | import os,sys
import webbrowser
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pylab as plt
from matplotlib import ticker
plt.rcParams['font.family'] = 'monospace'
fig = plt.figure()
rect = fig.add_subplot(111, aspect='equal')
data0 = np.loadtxt('data0.dat', delimiter=',')
data1 = np.loadtxt('data1.dat', delimiter=',')
dense = np.loadtxt('dense.dat', delimiter=',')
ID = sys.argv[1]
X = np.arange(-2.0, 2.05, 0.05)
Y = np.arange(-2.0, 2.05, 0.05)
Xm, Ym = np.meshgrid(X, Y)
vmin, vmax = dense.min(), dense.max()
if vmin * vmax < 0:
vmin = -abs(max(-vmin, vmax))
vmax = +abs(max(-vmin, vmax))
cr = rect.imshow(dense.reshape((len(Y), len(X))), extent=(X[0], X[-1], Y[0], Y[-1]), vmin=vmin, vmax=vmax, cmap=cm.coolwarm, origin='lower')
plt.contour(Xm, Ym, dense, levels=[-1, 1], cmap=cm.bwr, linestyles='dashed', linewidths=[2,2])
plt.contour(Xm, Ym, dense, levels=[0], colors='black', linestyles='dashed', linewidths=[2])
cb = plt.colorbar(cr, format='%+.1e')
cb.solids.set_edgecolor('face')
cb.set_ticks(ticker.LinearLocator(6))
cb.ax.tick_params(labelsize=12)
rect.scatter(data0[:,0], data0[:,1], marker='v', facecolor='red', edgecolor='black', s=30, lw=1)
rect.scatter(data1[:,0], data1[:,1], marker='^', facecolor='blue', edgecolor='black', s=30, lw=1)
plt.xlim(X[0], X[-1])
plt.ylim(Y[0], Y[-1])
plt.xlabel("")
plt.ylabel("")
plt.grid(ls='dotted')
plt.savefig('{}.svg'.format(ID), bbox_inches='tight', pad_inches=0.1)
plt.savefig('{}.eps'.format(ID), bbox_inches='tight', pad_inches=0.1)
os.remove('dense.dat')
os.remove('data0.dat')
os.remove('data1.dat')
webbrowser.open('file://{}'.format(os.path.realpath('{}.svg'.format(sys.argv[1]))))
| 38.795455 | 140 | 0.671353 |
1395df5d636c77222d35d82dfd590f6aefe93e2b | 455 | py | Python | sarna/report_generator/scores.py | rsrdesarrollo/sarna | 0c1f44e06a932520b70e505585a5469b77f6302e | [
"MIT"
] | 25 | 2019-03-11T22:42:52.000Z | 2022-03-15T09:49:15.000Z | sarna/report_generator/scores.py | hackingmess/sarna | 0c1f44e06a932520b70e505585a5469b77f6302e | [
"MIT"
] | 14 | 2019-01-08T08:35:51.000Z | 2022-03-11T23:30:28.000Z | sarna/report_generator/scores.py | hackingmess/sarna | 0c1f44e06a932520b70e505585a5469b77f6302e | [
"MIT"
] | 12 | 2019-07-26T05:38:32.000Z | 2022-03-29T09:54:49.000Z | from sarna.model.enums import Score, Language
from sarna.report_generator import make_run
from sarna.report_generator.locale_choice import locale_choice
from sarna.report_generator.style import RenderStyle
| 32.5 | 82 | 0.764835 |
139756e066bb02143bb1f17cfc6e0e0c48ac0c56 | 20,000 | py | Python | tests/hwsim/test_ap_open.py | waittrue/wireless | 3c64f015dc62aec4da0b696f45cc4bcf41594c5d | [
"Unlicense"
] | 1 | 2016-04-22T19:32:57.000Z | 2016-04-22T19:32:57.000Z | tests/hwsim/test_ap_open.py | Acidburn0zzz/third_party-hostap | 0542463c4de76fde6e8164f75b3a52ce0ddd8087 | [
"Unlicense"
] | null | null | null | tests/hwsim/test_ap_open.py | Acidburn0zzz/third_party-hostap | 0542463c4de76fde6e8164f75b3a52ce0ddd8087 | [
"Unlicense"
] | null | null | null | # Open mode AP tests
# Copyright (c) 2014, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import struct
import subprocess
import time
import os
import hostapd
import hwsim_utils
from tshark import run_tshark
from utils import alloc_fail
from wpasupplicant import WpaSupplicant
def test_ap_open(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
hwsim_utils.test_connectivity(dev[0], hapd)
dev[0].request("DISCONNECT")
ev = hapd.wait_event([ "AP-STA-DISCONNECTED" ], timeout=5)
if ev is None:
raise Exception("No disconnection event received from hostapd")
def test_ap_open_packet_loss(dev, apdev):
"""AP with open mode configuration and large packet loss"""
params = { "ssid": "open",
"ignore_probe_probability": "0.5",
"ignore_auth_probability": "0.5",
"ignore_assoc_probability": "0.5",
"ignore_reassoc_probability": "0.5" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(0, 3):
dev[i].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 3):
dev[i].wait_connected(timeout=20)
def test_ap_open_unknown_action(dev, apdev):
"""AP with open mode configuration and unknown Action frame"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
bssid = apdev[0]['bssid']
cmd = "MGMT_TX {} {} freq=2412 action=765432".format(bssid, bssid)
if "FAIL" in dev[0].request(cmd):
raise Exception("Could not send test Action frame")
ev = dev[0].wait_event(["MGMT-TX-STATUS"], timeout=10)
if ev is None:
raise Exception("Timeout on MGMT-TX-STATUS")
if "result=SUCCESS" not in ev:
raise Exception("AP did not ack Action frame")
def test_ap_open_invalid_wmm_action(dev, apdev):
"""AP with open mode configuration and invalid WMM Action frame"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
bssid = apdev[0]['bssid']
cmd = "MGMT_TX {} {} freq=2412 action=1100".format(bssid, bssid)
if "FAIL" in dev[0].request(cmd):
raise Exception("Could not send test Action frame")
ev = dev[0].wait_event(["MGMT-TX-STATUS"], timeout=10)
if ev is None or "result=SUCCESS" not in ev:
raise Exception("AP did not ack Action frame")
def test_ap_open_reconnect_on_inactivity_disconnect(dev, apdev):
"""Reconnect to open mode AP after inactivity related disconnection"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
hapd.request("DEAUTHENTICATE " + dev[0].p2p_interface_addr() + " reason=4")
dev[0].wait_disconnected(timeout=5)
dev[0].wait_connected(timeout=2, error="Timeout on reconnection")
def test_ap_open_assoc_timeout(dev, apdev):
"""AP timing out association"""
ssid = "test"
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].scan(freq="2412")
hapd.set("ext_mgmt_frame_handling", "1")
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
wait_connect=False)
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 11:
break
req = None
if not req:
raise Exception("Authentication frame not received")
resp = {}
resp['fc'] = req['fc']
resp['da'] = req['sa']
resp['sa'] = req['da']
resp['bssid'] = req['bssid']
resp['payload'] = struct.pack('<HHH', 0, 2, 0)
hapd.mgmt_tx(resp)
assoc = 0
for i in range(0, 10):
req = hapd.mgmt_rx()
if req is None:
raise Exception("MGMT RX wait timed out")
if req['subtype'] == 0:
assoc += 1
if assoc == 3:
break
if assoc != 3:
raise Exception("Association Request frames not received: assoc=%d" % assoc)
hapd.set("ext_mgmt_frame_handling", "0")
dev[0].wait_connected(timeout=15)
def test_ap_open_id_str(dev, apdev):
"""AP with open mode and id_str"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412", id_str="foo",
wait_connect=False)
ev = dev[0].wait_connected(timeout=10)
if "id_str=foo" not in ev:
raise Exception("CTRL-EVENT-CONNECT did not have matching id_str: " + ev)
if dev[0].get_status_field("id_str") != "foo":
raise Exception("id_str mismatch")
def test_ap_open_select_any(dev, apdev):
"""AP with open mode and select any network"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
id = dev[0].connect("unknown", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
dev[0].select_network(id)
ev = dev[0].wait_event(["CTRL-EVENT-NETWORK-NOT-FOUND",
"CTRL-EVENT-CONNECTED"], timeout=10)
if ev is None:
raise Exception("No result reported")
if "CTRL-EVENT-CONNECTED" in ev:
raise Exception("Unexpected connection")
dev[0].select_network("any")
dev[0].wait_connected(timeout=10)
def test_ap_open_unexpected_assoc_event(dev, apdev):
"""AP with open mode and unexpected association event"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=15)
dev[0].dump_monitor()
# This will be accepted due to matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_connected(timeout=15)
dev[0].dump_monitor()
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=5)
dev[0].dump_monitor()
# This will result in disconnection due to no matching network
subprocess.call(['iw', 'dev', dev[0].ifname, 'connect', 'open', "2412",
apdev[0]['bssid']])
dev[0].wait_disconnected(timeout=15)
def test_ap_bss_load(dev, apdev):
"""AP with open mode (no security) configuration"""
hapd = hostapd.add_ap(apdev[0]['ifname'],
{ "ssid": "open",
"bss_load_update_period": "10" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
# this does not really get much useful output with mac80211_hwsim currently,
# but run through the channel survey update couple of times
for i in range(0, 10):
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
hwsim_utils.test_connectivity(dev[0], hapd)
time.sleep(0.15)
def test_ap_open_out_of_memory(dev, apdev):
"""hostapd failing to setup interface due to allocation failure"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_alloc_bss_data")
for i in range(1, 3):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_iface_alloc")
for i in range(1, 5):
hapd_out_of_mem(hapd, apdev[1], i, "hostapd_config_defaults;hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_config_alloc")
hapd_out_of_mem(hapd, apdev[1], 1, "hostapd_driver_init")
for i in range(1, 4):
hapd_out_of_mem(hapd, apdev[1], i, "=wpa_driver_nl80211_drv_init")
# eloop_register_read_sock() call from i802_init()
hapd_out_of_mem(hapd, apdev[1], 1, "eloop_sock_table_add_sock;eloop_register_sock;?eloop_register_read_sock;=i802_init")
# verify that a new interface can still be added when memory allocation does
# not fail
hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
def test_bssid_black_white_list(dev, apdev):
"""BSSID black/white list"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist=apdev[1]['bssid'])
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist=apdev[1]['bssid'])
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00/00:00:00:00:00:00",
bssid_blacklist=apdev[1]['bssid'])
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
if dev[2].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[2] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
dev[2].request("REMOVE_NETWORK all")
dev[2].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="00:00:00:00:00:00", wait_connect=False)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_whitelist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412",
bssid_blacklist="11:22:33:44:55:66/ff:00:00:00:00:00 " + apdev[1]['bssid'] + " aa:bb:cc:dd:ee:ff")
if dev[0].get_status_field('bssid') != apdev[1]['bssid']:
raise Exception("dev[0] connected to unexpected AP")
if dev[1].get_status_field('bssid') != apdev[0]['bssid']:
raise Exception("dev[1] connected to unexpected AP")
dev[0].request("REMOVE_NETWORK all")
dev[1].request("REMOVE_NETWORK all")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected dev[2] connectin")
dev[2].request("REMOVE_NETWORK all")
def test_ap_open_wpas_in_bridge(dev, apdev):
"""Open mode AP and wpas interface in a bridge"""
br_ifname='sta-br0'
ifname='wlan5'
try:
_test_ap_open_wpas_in_bridge(dev, apdev)
finally:
subprocess.call(['ip', 'link', 'set', 'dev', br_ifname, 'down'])
subprocess.call(['brctl', 'delif', br_ifname, ifname])
subprocess.call(['brctl', 'delbr', br_ifname])
subprocess.call(['iw', ifname, 'set', '4addr', 'off'])
def test_ap_open_start_disabled(dev, apdev):
"""AP with open mode and beaconing disabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("RELOAD"):
raise Exception("RELOAD failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
def test_ap_open_start_disabled2(dev, apdev):
"""AP with open mode and beaconing disabled (2)"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open",
"start_disabled": "1" })
bssid = apdev[0]['bssid']
dev[0].flush_scan_cache()
dev[0].scan(freq=2412, only_new=True)
if dev[0].get_bss(bssid) is not None:
raise Exception("AP was seen beaconing")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].scan_for_bss(bssid, freq=2412)
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
if "OK" not in hapd.request("UPDATE_BEACON"):
raise Exception("UPDATE_BEACON failed")
dev[0].request("DISCONNECT")
dev[0].wait_disconnected()
dev[0].request("RECONNECT")
dev[0].wait_connected()
def test_ap_open_ifdown(dev, apdev):
"""AP with open mode and external ifconfig down"""
params = { "ssid": "open",
"ap_max_inactivity": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412")
dev[1].connect("open", key_mgmt="NONE", scan_freq="2412")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'down'])
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (1)")
ev = hapd.wait_event(["AP-STA-DISCONNECTED"], timeout=5)
if ev is None:
raise Exception("Timeout on AP-STA-DISCONNECTED (2)")
ev = hapd.wait_event(["INTERFACE-DISABLED"], timeout=5)
if ev is None:
raise Exception("No INTERFACE-DISABLED event")
# The following wait tests beacon loss detection in mac80211 on dev0.
# dev1 is used to test stopping of AP side functionality on client polling.
dev[1].request("REMOVE_NETWORK all")
subprocess.call(['ip', 'link', 'set', 'dev', apdev[0]['ifname'], 'up'])
dev[0].wait_disconnected()
dev[1].wait_disconnected()
ev = hapd.wait_event(["INTERFACE-ENABLED"], timeout=10)
if ev is None:
raise Exception("No INTERFACE-ENABLED event")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_open_disconnect_in_ps(dev, apdev, params):
"""Disconnect with the client in PS to regression-test a kernel bug"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
ev = hapd.wait_event([ "AP-STA-CONNECTED" ], timeout=5)
if ev is None:
raise Exception("No connection event received from hostapd")
time.sleep(0.2)
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_MANUAL_POLL)
try:
# inject some traffic
sa = hapd.own_addr()
da = dev[0].own_addr()
hapd.request('DATA_TEST_CONFIG 1')
hapd.request('DATA_TEST_TX {} {} 0'.format(da, sa))
hapd.request('DATA_TEST_CONFIG 0')
# let the AP send couple of Beacon frames
time.sleep(0.3)
# disconnect - with traffic pending - shouldn't cause kernel warnings
dev[0].request("DISCONNECT")
finally:
hwsim_utils.set_powersave(dev[0], hwsim_utils.PS_DISABLED)
time.sleep(0.2)
out = run_tshark(os.path.join(params['logdir'], "hwsim0.pcapng"),
"wlan_mgt.tim.partial_virtual_bitmap",
["wlan_mgt.tim.partial_virtual_bitmap"])
if out is not None:
state = 0
for l in out.splitlines():
pvb = int(l, 16)
if pvb > 0 and state == 0:
state = 1
elif pvb == 0 and state == 1:
state = 2
if state != 2:
raise Exception("Didn't observe TIM bit getting set and unset (state=%d)" % state)
def test_ap_open_select_network(dev, apdev):
"""Open mode connection and SELECT_NETWORK to change network"""
hapd1 = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
bssid1 = apdev[0]['bssid']
hapd2 = hostapd.add_ap(apdev[1]['ifname'], { "ssid": "open2" })
bssid2 = apdev[1]['bssid']
id1 = dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
only_add_network=True)
id2 = dev[0].connect("open2", key_mgmt="NONE", scan_freq="2412")
hwsim_utils.test_connectivity(dev[0], hapd2)
dev[0].select_network(id1)
dev[0].wait_connected()
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry")
hwsim_utils.test_connectivity(dev[0], hapd1)
dev[0].select_network(id2)
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd2)
res = dev[0].request("BLACKLIST")
if bssid1 in res or bssid2 in res:
raise Exception("Unexpected blacklist entry(2)")
def test_ap_open_disable_enable(dev, apdev):
"""AP with open mode getting disabled and re-enabled"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
dev[0].connect("open", key_mgmt="NONE", scan_freq="2412",
bg_scan_period="0")
for i in range(2):
hapd.request("DISABLE")
dev[0].wait_disconnected()
hapd.request("ENABLE")
dev[0].wait_connected()
hwsim_utils.test_connectivity(dev[0], hapd)
def test_ap_open_sta_enable_disable(dev, apdev):
"""AP with open mode and wpa_supplicant ENABLE/DISABLE_NETWORK"""
hapd = hostapd.add_ap(apdev[0]['ifname'], { "ssid": "open" })
bssid = apdev[0]['bssid']
sta_enable_disable(dev[0], bssid)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5", drv_params="force_connect_cmd=1")
sta_enable_disable(wpas, bssid)
| 41.237113 | 124 | 0.6295 |
13980d70f605aa90e6f0d5a0697ef90a4b646aec | 4,708 | py | Python | task_templates/pipelines/python3_pytorch_regression/model_utils.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | null | null | null | task_templates/pipelines/python3_pytorch_regression/model_utils.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | 9 | 2021-11-10T20:16:41.000Z | 2022-03-12T00:59:05.000Z | task_templates/pipelines/python3_pytorch_regression/model_utils.py | andreakropp/datarobot-user-models | 423ab8c703a545491ad6013a0b7efa3119e2c0fc | [
"Apache-2.0"
] | 1 | 2021-06-17T22:05:33.000Z | 2021-06-17T22:05:33.000Z | #!/usr/bin/env python
# coding: utf-8
# pylint: disable-all
from __future__ import absolute_import
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
def train_epoch(model, opt, criterion, X, y, batch_size=50):
model.train()
losses = []
for beg_i in range(0, X.size(0), batch_size):
x_batch = X[beg_i : beg_i + batch_size, :]
# y_hat will be (batch_size, 1) dim, so coerce target to look the same
y_batch = y[beg_i : beg_i + batch_size].reshape(-1, 1)
x_batch = Variable(x_batch)
y_batch = Variable(y_batch)
opt.zero_grad()
# (1) Forward
y_hat = model(x_batch)
# (2) Compute diff
loss = criterion(y_hat, y_batch)
# (3) Compute gradients
loss.backward()
# (4) update weights
opt.step()
losses.append(loss.data.numpy())
return losses
def build_classifier(X, num_labels):
class_model = BinModel(X.shape[1]) if num_labels == 2 else MultiModel(X.shape[1], num_labels)
class_opt = optim.Adam(class_model.parameters(), lr=0.001)
class_criterion = nn.BCELoss() if num_labels == 2 else nn.CrossEntropyLoss()
return class_model, class_opt, class_criterion
def build_regressor(X):
reg_model = RegModel(X.shape[1])
reg_opt = optim.Adam(reg_model.parameters(), lr=0.001)
reg_criterion = nn.MSELoss()
return reg_model, reg_opt, reg_criterion
def train_classifier(X, y, class_model, class_opt, class_criterion, n_epochs=5):
target_encoder = LabelEncoder()
target_encoder.fit(y)
transformed_y = target_encoder.transform(y)
bin_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
bin_t_y = torch.from_numpy(transformed_y).type(class_model.expected_target_type)
for e in range(n_epochs):
train_epoch(class_model, class_opt, class_criterion, bin_t_X, bin_t_y)
| 31.810811 | 112 | 0.656542 |
13984baeb601966ec125e0ffbdce4b6e8815be83 | 9,894 | py | Python | py/surveysim/weather.py | mlandriau/surveysim | e7a323d6c4031b1b8df25e776dbe81188fbe8860 | [
"BSD-3-Clause"
] | null | null | null | py/surveysim/weather.py | mlandriau/surveysim | e7a323d6c4031b1b8df25e776dbe81188fbe8860 | [
"BSD-3-Clause"
] | 55 | 2016-11-14T21:58:11.000Z | 2021-03-16T01:07:31.000Z | py/surveysim/weather.py | mlandriau/surveysim | e7a323d6c4031b1b8df25e776dbe81188fbe8860 | [
"BSD-3-Clause"
] | 4 | 2016-11-19T00:17:02.000Z | 2021-02-24T14:38:46.000Z | """Simulate stochastic observing weather conditions.
The simulated conditions include seeing, transparency and the dome-open fraction.
"""
from __future__ import print_function, division, absolute_import
from datetime import datetime
import numpy as np
import astropy.time
import astropy.table
import astropy.units as u
import desiutil.log
import desimodel.weather
import desisurvey.config
import desisurvey.ephem
import desisurvey.utils
| 43.973333 | 92 | 0.63392 |
1398a0a81ef6551f1edec803a59f1dbf8ef55e95 | 8,075 | py | Python | lib/csv_writer.py | takeratta/ga-dev-tools | 19dcf7c750af8214e5a306fc0f8e2b28bef7bb40 | [
"Apache-2.0"
] | 2 | 2020-07-02T14:29:44.000Z | 2021-12-02T09:31:36.000Z | lib/csv_writer.py | jeffreychung/ga-dev-tools | 19dcf7c750af8214e5a306fc0f8e2b28bef7bb40 | [
"Apache-2.0"
] | 3 | 2022-02-19T14:08:17.000Z | 2022-03-03T22:32:16.000Z | lib/csv_writer.py | colorstheforce/ga-dev-tools | 46dd9652f9a7d9f8255b6d401985fdcfb8b61b25 | [
"Apache-2.0"
] | 1 | 2021-01-02T17:04:16.000Z | 2021-01-02T17:04:16.000Z | # coding=utf-8
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to convert a Data Export API reponse into TSV.
This provides utitlites to both print TSV files to the standard output
as well as directly to a file. This logic handles all the utf-8 conversion.
GetTsvFilePrinter: Returns an instantiated object to output to files.
GetTsvScreenPrinter: Returns an instantiated object to output to the screen.
UnicodeWriter(): Utf-8 encodes output.
ExportPrinter(): Converts the Data Export API response into tabular data.
"""
__author__ = 'api.nickm@ (Nick Mihailovski)'
import codecs
import csv
import StringIO
import sys
import types
# A list of special characters that need to be escaped.
SPECIAL_CHARS = ('+', '-', '/', '*', '=')
# TODO(nm): Test leading numbers.
def GetTsvFilePrinter(file_name):
"""Returns a ExportPrinter object to output to file_name.
Args:
file_name: string The name of the file to output to.
Returns:
The newly created ExportPrinter object.
"""
my_handle = open(file_name)
writer = UnicodeWriter(my_handle, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvScreenPrinter():
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(sys.stdout, dialect='excel-tab')
return ExportPrinter(writer)
def GetTsvStringPrinter(f):
"""Returns a ExportPrinter object to output to std.stdout."""
writer = UnicodeWriter(f, dialect='excel-tab')
return ExportPrinter(writer)
# Wrapper to output to utf-8. Taken mostly / directly from Python docs:
# http://docs.python.org/library/csv.html
def ExcelEscape(input_value):
"""Escapes the first character of a string if it is special in Excel.
Args:
input_value: string The value to escape.
Returns:
A string that has the first character escaped if it is special.
"""
if input_value and input_value[0] in SPECIAL_CHARS:
return "'" + input_value
return input_value
| 30.130597 | 78 | 0.687059 |
13991543937ea97e225d2fffa3ed5c4c26a13a38 | 2,924 | py | Python | resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py | yuwenxianglong/zhxsh.github.io | 427d14b787e55df26e03a069288815b14ab6b534 | [
"MIT"
] | null | null | null | resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py | yuwenxianglong/zhxsh.github.io | 427d14b787e55df26e03a069288815b14ab6b534 | [
"MIT"
] | 1 | 2021-03-30T04:35:57.000Z | 2021-03-30T04:35:57.000Z | resdata/TensorFlow/RNN_Prediction/stockPrediction202005201318.py | yuwenxianglong/yuwenxianglong.github.io | 196e32d2775ef3a3863603cb5c30023450a1944c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Project : RNN_Prediction
@Author : Xu-Shan Zhao
@Filename: stockPrediction202005201318.py
@IDE : PyCharm
@Time1 : 2020-05-20 13:18:46
@Time2 : 2020/5/20 13:18
@Month1 : 5
@Month2 :
"""
import tushare as ts
import tensorflow as tf
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
stock_catl = ts.get_hist_data('300750')
stock_catl = stock_catl.sort_index(ascending=True)
stock_catl = (stock_catl - stock_catl.mean()) / \
(stock_catl.max() - stock_catl.min())
# train, val = train_test_split(stock_catl, test_size=0.5)
# train = train.sort_index(ascending=True)
# val = val.sort_index(ascending=True)
train = stock_catl.iloc[:-60, :]
val = stock_catl.iloc[-60:, :]
window_size = 30
column = 'high'
epoches = 300
ds_train = zip_ds(train)
ds_val = zip_ds(val)
model = tf.keras.Sequential(
[
tf.keras.layers.LSTM(128, return_sequences=True, activation='relu'),
tf.keras.layers.LSTM(128, activation='relu'),
tf.keras.layers.Dense(13)
]
)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=optimizer, loss='mse')
history = model.fit(
ds_train, epochs=epoches,
steps_per_epoch=5,
validation_data=ds_val,
validation_steps=1
)
model.save('stockLSTM')
# Plot loss function
plt.figure(figsize=(19, 9))
ax = plt.gca()
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'])
ax.set_yscale('log')
plt.show()
# Compare fitting and real values.
dff = pd.DataFrame()
for i in range(len(stock_catl) - window_size):
fits = model.predict(tf.constant(tf.expand_dims(stock_catl.values[i:i + window_size, :], axis=0)))
dffits = pd.DataFrame(fits, columns=stock_catl.columns)
dff = dff.append(dffits)
dff.index = stock_catl.index[window_size:]
plt.figure(figsize=(19, 9))
dff[column].plot()
stock_catl.iloc[window_size:, :][column].plot(style='-o')
plt.show()
# To predict future 100 business days.
dfp = stock_catl.copy()
for i in range(100):
pres = model.predict(tf.constant(tf.expand_dims(dfp.values[-1 * window_size:], axis=0)))
dfpres = pd.DataFrame(pres, columns=stock_catl.columns)
dfp = dfp.append(dfpres, ignore_index=True)
dfp[column].plot()
plt.show()
| 28.666667 | 102 | 0.713748 |
13996a1a6227b5d37ae0ca02175dbff81af29e2d | 29,063 | py | Python | src/mushme.py | MuShMe/MuShMe | dbc9b940c827039016d7917d535882b47d7d8e5b | [
"Unlicense",
"MIT"
] | 1 | 2015-07-10T06:14:29.000Z | 2015-07-10T06:14:29.000Z | src/mushme.py | MuShMe/MuShMe | dbc9b940c827039016d7917d535882b47d7d8e5b | [
"Unlicense",
"MIT"
] | 2 | 2016-01-10T04:27:12.000Z | 2016-01-10T10:47:57.000Z | src/mushme.py | MuShMe/MuShMe | dbc9b940c827039016d7917d535882b47d7d8e5b | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from src import app
import os
import shutil
from flask import Flask, render_template, session, request, flash, url_for, redirect
from Forms import ContactForm, LoginForm, editForm, ReportForm, CommentForm, searchForm, AddPlaylist
from flask.ext.mail import Message, Mail
from werkzeug import secure_filename
from werkzeug import SharedDataMiddleware
from api import API
from songs import SONG
from playlist import playlist
from admin import admin
from artist import artist
import pymysql
import hashlib
from flask import g
mail = Mail()
mail.init_app(app)
#For the collector script.
app.register_blueprint(API);
#For the songs
app.register_blueprint(SONG);
#For the playlist
app.register_blueprint(playlist);
#for the admin pages
app.register_blueprint(admin);
#for the artist pages
app.register_blueprint(artist);
UPLOAD_FOLDER = "img/ProfilePic/"
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = 'src/static/' + UPLOAD_FOLDER
#For database connections.
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
))
app.add_url_rule('/user/uploads/<filename>', 'uploaded_file',build_only=True)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/user/uploads': 'src/static' + app.config['UPLOAD_FOLDER'] })
def requestvalidate(userfrom,userto):
check = g.database.execute(""" SELECT Status from requests where Request_to="%s" and Request_from="%s" """ % (userfrom,userto))
if check and g.database.fetchone()[0]=='-1' and userfrom!=userto:
return False
else:
return True
#All your profile are belong to us.
#To handle 404 not found errors
if not app.debug:
import logging
from logging.handlers import SMTPHandler
mail_handler = SMTPHandler('127.0.0.1', '[email protected]', app.config['DEFAULT_MAIL_SENDER'], 'YourApplication Failed')
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
from logging import FileHandler
file_handler = FileHandler('log.txt')
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
from logging import Formatter
mail_handler.setFormatter(Formatter('''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
'''))
if __name__ == """__main__""":
# To allow aptana to receive errors, set use_debugger=False
app = create_app(config="""config.yaml""")
if app.debug: use_debugger = True
try:
# Disable Flask's debugger if external debugger is requested
use_debugger = not(app.config.get('DEBUG_WITH_APTANA'))
except:
pass
app.run(use_debugger=use_debugger,
use_reloader=use_debugger, threaded=True, port=8080)
| 42.181422 | 251 | 0.600282 |
13998d176731562bde5bd78d5d04ea6a48f3fc9c | 19,221 | py | Python | language/labs/drkit/evaluate.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/labs/drkit/evaluate.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/labs/drkit/evaluate.py | Xtuden-com/language | 70c0328968d5ffa1201c6fdecde45bbc4fec19fc | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate lazy slot filling results."""
import codecs
import collections
import gzip
import json
import random
import re
import string
import unicodedata
from absl import app
from absl import flags
from bert import tokenization
from language.labs.drkit import input_fns
import numpy as np
import tensorflow.compat.v1 as tf
PUNCTUATION = frozenset(string.punctuation)
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("ground_truth_file", None,
"File with ground truth answers.")
flags.DEFINE_string("predicted_answers_file", None,
"File with predicted answers from model.")
flags.DEFINE_string("relation_counts_file", None,
"JSON file with relation counts.")
def wikimovie_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_ques = {ex.qas_id: ex.question_text for ex in dataset.examples}
gt_entity = {ex.qas_id: ex.subject_entity[0] for ex in dataset.examples}
inf_chain = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
chain2stats = {ch: [0., 0.] for ch in inf_chain.values()}
incorrect_results, correct_results = [], []
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction in gt_answer[qas_id]:
num_correct += 1
chain2stats[inf_chain[qas_id]][0] += 1
correct_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
correct_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
else:
incorrect_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
incorrect_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
chain2stats[inf_chain[qas_id]][1] += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
json.dump(
random.sample(incorrect_results, 100),
tf.gfile.Open(output_prediction_file + ".incorrect", "w"),
cls=NumpyEncoder)
json.dump(
random.sample(correct_results, 100),
tf.gfile.Open(output_prediction_file + ".correct", "w"),
cls=NumpyEncoder)
# Return metrics.
metrics = {
"accuracy": accuracy,
}
for ch, stats in chain2stats.items():
metrics["inference-chains-acc/" + ch] = stats[0] / stats[1]
return metrics
def multihop_eval_fn(dataset,
results,
name_map,
output_prediction_file,
supervision="mention",
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
supervision: Type of supervision used in the model.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_mentions = {ex.qas_id: ex.answer_mention[0] for ex in dataset.examples}
if supervision == "mention":
gt_answer = gt_mentions
else:
gt_answer = {ex.qas_id: ex.answer_entity[0] for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction == gt_answer[qas_id]:
num_correct += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
micro, macro, _, _ = compute_scores(dataset.gt_file, output_prediction_file)
# Return metrics.
metrics = {
"accuracy": accuracy,
"micro-p": micro[0],
"micro-r": micro[1],
"micro-f": micro[2],
"macro-p": macro[0],
"macro-r": macro[1],
"macro-f": macro[2],
}
return metrics
def hotpot_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for HotpotQADataset.
Args:
dataset: An object of type HotpotQADataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_types = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = {2: 0., 5: 0., 10: 0., 20: 0.}
aps = []
no_answer = 0.
all_predictions = {}
bridge_acc, comp_acc = 0., 0.
bridge_tot, comp_tot = 0, 0
single_acc = 0.
layer_weights = np.zeros_like(results[0]["layer_probs"])
num_layer_entities = {i: 0. for i in range(layer_weights.shape[0])}
num_new_entities = {i: 0. for i in range(layer_weights.shape[0])}
for result in results:
qas_id = result["qas_ids"].decode("utf-8")
preds = result["top_idx"]
scores = result["top_vals"]
ans = gt_answer[qas_id]
my_type = gt_types[qas_id]
if my_type == "bridge":
bridge_tot += 1
else:
comp_tot += 1
ranks = np.where(np.in1d(preds, ans))[0]
ranks = np.sort(ranks)
ap = 0.
cnt = 0.
if any(rr < 10 for rr in ranks):
single_acc += 1
if ranks.shape[0] == 0:
no_answer += 1
for rr in ranks:
cnt += 1
ap += cnt / (rr + 1)
if ans:
aps.append(ap / len(ans))
else:
aps.append(0.)
found = False
for key in [2, 5, 10, 20]:
if found or np.in1d(ans, preds[:key]).all():
num_correct[key] += 1
found = True
if key == 10:
if my_type == "bridge":
bridge_acc += 1
else:
comp_acc += 1
# Non-accuracy stats
layer_weights += result["layer_probs"]
layer_entities = {i: set() for i in range(layer_weights.shape[0])}
all_predictions[qas_id] = {}
for i in range(layer_weights.shape[0]):
layer_entities[i] = set(
[ee for ee in result["layer_%d_ent" % i] if ee != -1])
num_layer_entities[i] += len(layer_entities[i])
num_new_entities[i] += len(layer_entities[i] - layer_entities[0])
# all_predictions[qas_id]["layer_%d" % i] = [
# name_map[str(ee)] for ee in layer_entities[i]]
all_predictions[qas_id]["predictions"] = [
(name_map[str(pred)], str(scores[i])) for i, pred in enumerate(preds)
]
tf.logging.info("Evaluated %d items", len(all_predictions))
accuracy = {
key: (num_correct[key] / len(all_predictions)) for key in num_correct
}
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
# Return metrics.
metrics = {"eval/@%d" % key: accuracy[key] for key in accuracy}
metrics["accuracy"] = accuracy[10]
metrics["eval/map"] = sum(aps) / len(all_predictions)
metrics["eval/bridge_accuracy"] = bridge_acc / bridge_tot
metrics["eval/comparison_accuracy"] = comp_acc / comp_tot
metrics["analysis/single_accuracy"] = single_acc / len(all_predictions)
metrics["analysis/no_answers"] = no_answer / len(all_predictions)
for i in range(layer_weights.shape[0]):
metrics["analysis/layer_weight_%d" %
i] = layer_weights[i] / len(all_predictions)
metrics["analysis/num_entities_%d" %
i] = num_layer_entities[i] / len(all_predictions)
metrics["analysis/num_new_entities_%d" %
i] = num_new_entities[i] / len(all_predictions)
return metrics
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Compute F1 score."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Compute EM score."""
return normalize_answer(prediction) == normalize_answer(ground_truth)
def read_answers(gold_file):
"""Read ground truth answers."""
answers = {}
f = tf.gfile.Open(gold_file)
if gold_file.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and "header" in example:
continue
for qa in example["qas"]:
answers[qa["qid"]] = qa["answers"]
f.close()
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Compute F1 and EM scores."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = "Unanswered question %s will receive score 0." % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def compute_scores(ground_truth_file, predicted_answers_file):
"""Read predictions and ground truth and return P, R, F."""
telemetry, incorrect = read_results(ground_truth_file, predicted_answers_file)
micro = aprf(telemetry)
relationwise = aprf_relationwise(telemetry)
macro = sum([val[0] for _, val in relationwise.items()])
macro = macro / len(relationwise)
return micro, macro, relationwise, incorrect
def read_results(ground_truth_file, predicted_answers_file):
"""Read results and ground truth and return data structure with stats."""
with codecs.getreader("utf-8")(tf.gfile.GFile(ground_truth_file,
"r")) as read:
data_ = {}
for line in read:
item = json.loads(line.strip())
if isinstance(item["relation"], dict):
relation = item["relation"]["wikidata_id"]
elif isinstance(item["relation"], list):
relation = (
item["relation"][0]["wikidata_id"] + "_" +
item["relation"][1]["wikidata_id"])
data_[item["id"]] = [relation, item["subject"]["wikidata_id"]]
if "is_impossible" in item and item["is_impossible"]:
continue
if item["object"] is None:
continue
if isinstance(item["object"]["mention"], dict):
data_[item["id"]] += [item["object"]["mention"]["text"]]
if "name" in item["object"]:
data_[item["id"]] += [item["object"]["name"]]
if "aliases" in item["object"]:
data_[item["id"]] += item["object"]["aliases"].keys()
with codecs.getreader("utf-8")(tf.gfile.GFile(predicted_answers_file,
"r")) as fin:
predictions = json.load(fin)
telemetry, incorrect = [], []
n = 0
for key in data_:
if key not in predictions:
continue
g = data_[key][2:]
a = predictions[key]
m = data_[key][:2]
stats = score(g, a)
telemetry.append([m[0], m[1], g, a, stats])
if stats[0] == 0. and stats[3] > 0.:
incorrect.append(key)
n += 1
return telemetry, incorrect
def aprf_relationwise(g):
"""Returns precision, recall and F score for each relation."""
rel_to_stats = collections.defaultdict(list)
for item in g:
rel_to_stats[item[0]].append(item)
rel_to_scores = {}
for rel, stats in rel_to_stats.items():
rel_to_scores[rel] = [aprf(stats), len(stats)]
return rel_to_scores
def aprf(g):
"""Returns precision, recall and F of the given statistics."""
tp, _, sys_pos, real_pos = sum([x[-1] for x in g])
if tp == 0:
p = r = f = 0.0
else:
p = tp / float(sys_pos) if sys_pos > 0 else 0.
r = tp / float(real_pos) if real_pos > 0 else 0.
f = 2 * p * r / (p + r)
return np.asarray([p, r, f])
def score(gold, answer):
"""Compares answer to ground truth to return TP / FP stats."""
if gold:
gold = set([simplify(g) for g in gold])
answer = simplify(answer)
result = np.zeros(4)
if gold:
result[3] += 1
if answer in gold:
result[0] += 1
else:
if not answer:
result[1] += 1
if answer:
result[2] += 1
return result
def strip_accents_and_punct(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
if char in PUNCTUATION:
continue
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def simplify(answer):
"""Pre-process answer string."""
toks = []
articles = {"the", "a", "an", "and", ""}
for t in answer.strip().lower().split():
tok = strip_accents_and_punct(t)
if tok not in articles:
toks.append(tok)
return "".join(toks)
def rare_relation_scores(relationwise, relation2counts):
"""Print statistics of rare relations for different thresholds."""
for thresh in [5, 100, 500, 1000]:
freq_stats, freq_total = np.array([0., 0., 0.]), 0
rare_stats, rare_total = np.array([0., 0., 0.]), 0
for relation, (stats, _) in relationwise.items():
if relation2counts.get(relation, 0) < thresh:
rare_stats += stats
rare_total += 1
else:
freq_stats += stats
freq_total += 1
rare_stats /= rare_total
freq_stats /= freq_total
print(
"Threshold =", thresh, "rare", rare_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(rare_stats[0], rare_stats[1], rare_stats[2]), "freq", freq_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(freq_stats[0], freq_stats[1], freq_stats[2]))
if __name__ == "__main__":
app.run(main)
| 33.196891 | 80 | 0.643827 |
139af14f3890b6a5fdebd9bc833f815258ac26c3 | 1,433 | py | Python | tests/adv/test_pop_sfrd.py | jlashner/ares | 6df2b676ded6bd59082a531641cb1dadd475c8a8 | [
"MIT"
] | 10 | 2020-03-26T01:08:10.000Z | 2021-12-04T13:02:10.000Z | tests/adv/test_pop_sfrd.py | jlashner/ares | 6df2b676ded6bd59082a531641cb1dadd475c8a8 | [
"MIT"
] | 25 | 2020-06-08T14:52:28.000Z | 2022-03-08T02:30:54.000Z | tests/adv/test_pop_sfrd.py | jlashner/ares | 6df2b676ded6bd59082a531641cb1dadd475c8a8 | [
"MIT"
] | 8 | 2020-03-24T14:11:25.000Z | 2021-11-06T06:32:59.000Z | """
test_pop_models.py
Author: Jordan Mirocha
Affiliation: UCLA
Created on: Fri Jul 15 15:23:11 PDT 2016
Description:
"""
import ares
import matplotlib.pyplot as pl
PB = ares.util.ParameterBundle
if __name__ == '__main__':
test()
| 25.589286 | 75 | 0.669923 |
139b3faa5c126c4a50efcad5c0824965049d5697 | 258 | py | Python | venv/lib/python3.7/site-packages/leancloud/engine/utils.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/leancloud/engine/utils.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
] | null | null | null | venv/lib/python3.7/site-packages/leancloud/engine/utils.py | corgiclub/CorgiBot_telegram | a63d91a74ee497b9a405e93bd3b303367ef95268 | [
"MIT"
] | null | null | null | # coding: utf-8
import time
import hashlib
import leancloud
from leancloud._compat import to_bytes
__author__ = 'asaka <[email protected]>'
| 18.428571 | 77 | 0.748062 |
139b6ad51a7b83cb108f4b1bb43a2ce22b27cc6e | 2,377 | py | Python | AirplaneLQR/chap4LQR/mavsim_chap4.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2020-06-07T00:14:42.000Z | 2020-06-07T00:14:42.000Z | AirplaneLQR/chap4LQR/mavsim_chap4.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | null | null | null | AirplaneLQR/chap4LQR/mavsim_chap4.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2019-06-24T22:10:48.000Z | 2019-06-24T22:10:48.000Z | """
mavsimPy
- Chapter 4 assignment for Beard & McLain, PUP, 2012
- Update history:
12/27/2018 - RWB
1/17/2019 - RWB
"""
import sys
sys.path.append('..')
import numpy as np
import parameters.simulation_parameters as SIM
from chap2.mav_viewer import mav_viewer
# from chap2.video_writer import video_writer
from chap3.data_viewer import data_viewer
from chap4.mav_dynamics import mav_dynamics
from chap4.wind_simulation import wind_simulation
from time import sleep
# initialize the visualization
VIDEO = False # True==write video, False==don't write video
mav_view = mav_viewer() # initialize the mav viewer
data_view = data_viewer() # initialize view of data plots
if VIDEO == True:
video = video_writer(video_name="chap4_video.avi",
bounding_box=(0, 0, 1000, 1000),
output_rate=SIM.ts_video)
# initialize elements of the architecture
wind = wind_simulation(SIM.ts_simulation)
mav = mav_dynamics(SIM.ts_simulation)
# initialize the simulation time
sim_time = SIM.start_time
# main simulation loop
# sleep(5)
print("Press Command-Q to exit...")
while sim_time < SIM.end_time:
#-------set control surfaces-------------
if(sim_time<25):
delta_e = -0.1
delta_t = 1.0 # 0.5
delta_a = 0.0 # 0.0
delta_r = 0.0 # 0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
else:
delta_e = -0.3
delta_t = 1.0#0.5
delta_a = 0.01#0.0
delta_r = 0.00025#0.005
delta = np.array([[delta_e, delta_t, delta_a, delta_r]]).T # transpose to make it a column vector
#-------physical system-------------
current_wind = wind.update() # get the new wind vector
# print("current wind: ", current_wind)
mav.update_state(delta, current_wind) # propagate the MAV dynamics
#-------update viewer-------------
mav_view.update(mav.msg_true_state) # plot body of MAV
data_view.update(mav.msg_true_state, # true states
mav.msg_true_state, # estimated states
mav.msg_true_state, # commanded states
SIM.ts_simulation)
if VIDEO == True:
video.update(sim_time)
#-------increment time-------------
sim_time += SIM.ts_simulation
if VIDEO == True:
video.close()
| 30.87013 | 106 | 0.63862 |
139b92054f917712ecbfacdc663b9fc7eea6103f | 6,010 | py | Python | core/self6dpp/tools/ycbv/ycbv_pbr_so_mlBCE_Double_3_merge_train_real_uw_init_results_with_refined_poses_to_json.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 33 | 2021-12-15T07:11:47.000Z | 2022-03-29T08:58:32.000Z | core/self6dpp/tools/ycbv/ycbv_pbr_so_mlBCE_Double_3_merge_train_real_uw_init_results_with_refined_poses_to_json.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 3 | 2021-12-15T11:39:54.000Z | 2022-03-29T07:24:23.000Z | core/self6dpp/tools/ycbv/ycbv_pbr_so_mlBCE_Double_3_merge_train_real_uw_init_results_with_refined_poses_to_json.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | null | null | null | import os.path as osp
import sys
import numpy as np
import mmcv
from tqdm import tqdm
from functools import cmp_to_key
cur_dir = osp.dirname(osp.abspath(__file__))
PROJ_ROOT = osp.normpath(osp.join(cur_dir, "../../../../"))
sys.path.insert(0, PROJ_ROOT)
from lib.pysixd import inout, misc
from lib.utils.bbox_utils import xyxy_to_xywh
from lib.utils.utils import iprint, wprint
id2obj = {
1: "002_master_chef_can", # [1.3360, -0.5000, 3.5105]
2: "003_cracker_box", # [0.5575, 1.7005, 4.8050]
3: "004_sugar_box", # [-0.9520, 1.4670, 4.3645]
4: "005_tomato_soup_can", # [-0.0240, -1.5270, 8.4035]
5: "006_mustard_bottle", # [1.2995, 2.4870, -11.8290]
6: "007_tuna_fish_can", # [-0.1565, 0.1150, 4.2625]
7: "008_pudding_box", # [1.1645, -4.2015, 3.1190]
8: "009_gelatin_box", # [1.4460, -0.5915, 3.6085]
9: "010_potted_meat_can", # [2.4195, 0.3075, 8.0715]
10: "011_banana", # [-18.6730, 12.1915, -1.4635]
11: "019_pitcher_base", # [5.3370, 5.8855, 25.6115]
12: "021_bleach_cleanser", # [4.9290, -2.4800, -13.2920]
13: "024_bowl", # [-0.2270, 0.7950, -2.9675]
14: "025_mug", # [-8.4675, -0.6995, -1.6145]
15: "035_power_drill", # [9.0710, 20.9360, -2.1190]
16: "036_wood_block", # [1.4265, -2.5305, 17.1890]
17: "037_scissors", # [7.0535, -28.1320, 0.0420]
18: "040_large_marker", # [0.0460, -2.1040, 0.3500]
19: "051_large_clamp", # [10.5180, -1.9640, -0.4745]
20: "052_extra_large_clamp", # [-0.3950, -10.4130, 0.1620]
21: "061_foam_brick", # [-0.0805, 0.0805, -8.2435]
}
obj_num = len(id2obj)
obj2id = {_name: _id for _id, _name in id2obj.items()}
if __name__ == "__main__":
new_res_path = osp.join(
PROJ_ROOT,
"datasets/BOP_DATASETS/ycbv/test/init_poses/",
"resnest50d_online_AugCosyAAEGray_mlBCE_DoubleMask_ycbv_pbr_100e_so_GdrnPbrPose_withYolov4PbrBbox_wDeepimPbrPose_ycbv_train_real_uw.json",
)
if osp.exists(new_res_path):
wprint("{} already exists! overriding!".format(new_res_path))
res_root = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/"
iter_num_test = 4
pkl_paths = [
"01_02MasterChefCan/inference_model_final_wo_optim-2de2b4e3/ycbv_002_master_chef_can_train_real_uw/results.pkl",
"02_03CrackerBox/inference_model_final_wo_optim-41082f8a/ycbv_003_cracker_box_train_real_uw/results.pkl",
"03_04SugarBox/inference_model_final_wo_optim-e09dec3e/ycbv_004_sugar_box_train_real_uw/results.pkl",
"04_05TomatoSoupCan/inference_model_final_wo_optim-5641f5d3/ycbv_005_tomato_soup_can_train_real_uw/results.pkl",
"05_06MustardBottle/inference_model_final_wo_optim-6ce23e94/ycbv_006_mustard_bottle_train_real_uw/results.pkl",
"06_07TunaFishCan/inference_model_final_wo_optim-0a768962/ycbv_007_tuna_fish_can_train_real_uw/results.pkl",
"07_08PuddingBox/inference_model_final_wo_optim-f2f2cf73/ycbv_008_pudding_box_train_real_uw/results.pkl",
"08_09GelatinBox/inference_model_final_wo_optim-a303aa1e/ycbv_009_gelatin_box_train_real_uw/results.pkl",
"09_10PottedMeatCan/inference_model_final_wo_optim-84a56ffd/ycbv_010_potted_meat_can_train_real_uw/results.pkl",
"10_11Banana/inference_model_final_wo_optim-83947126/ycbv_011_banana_train_real_uw/results.pkl",
"11_19PitcherBase/inference_model_final_wo_optim-af1c7e62/ycbv_019_pitcher_base_train_real_uw/results.pkl",
"12_21BleachCleanser/inference_model_final_wo_optim-5d740a46/ycbv_021_bleach_cleanser_train_real_uw/results.pkl",
"13_24Bowl/inference_model_final_wo_optim-f11815d3/ycbv_024_bowl_train_real_uw/results.pkl",
"14_25Mug/inference_model_final_wo_optim-e4824065/ycbv_025_mug_train_real_uw/results.pkl",
"15_35PowerDrill/inference_model_final_wo_optim-30d7d1da/ycbv_035_power_drill_train_real_uw/results.pkl",
"16_36WoodBlock/inference_model_final_wo_optim-fbb38751/ycbv_036_wood_block_train_real_uw/results.pkl",
"17_37Scissors/inference_model_final_wo_optim-5068c6bb/ycbv_037_scissors_train_real_uw/results.pkl",
"18_40LargeMarker/inference_model_final_wo_optim-e8d5867c/ycbv_040_large_marker_train_real_uw/results.pkl",
"19_51LargeClamp/inference_model_final_wo_optim-1ea79b34/ycbv_051_large_clamp_train_real_uw/results.pkl",
"20_52ExtraLargeClamp/inference_model_final_wo_optim-cb595297/ycbv_052_extra_large_clamp_train_real_uw/results.pkl",
"21_61FoamBrick/inference_model_final_wo_optim-d3757ca1/ycbv_061_foam_brick_train_real_uw/results.pkl",
]
obj_names = [obj for obj in obj2id]
new_res_dict = {}
for obj_name, pred_name in zip(obj_names, pkl_paths):
assert obj_name in pred_name, "{} not in {}".format(obj_name, pred_name)
pred_path = osp.join(res_root, pred_name)
assert osp.exists(pred_path), pred_path
iprint(obj_name, pred_path)
# pkl scene_im_id key, list of preds
preds = mmcv.load(pred_path)
for scene_im_id, pred_list in preds.items():
for pred in pred_list:
obj_id = pred["obj_id"]
score = pred["score"]
bbox_est = pred["bbox_det_xyxy"] # xyxy
bbox_est_xywh = xyxy_to_xywh(bbox_est)
refined_pose = pred["pose_{}".format(iter_num_test)]
pose_est = pred["pose_0"]
cur_new_res = {
"obj_id": obj_id,
"score": float(score),
"bbox_est": bbox_est_xywh.tolist(),
"pose_est": pose_est.tolist(),
"pose_refine": refined_pose.tolist(),
}
if scene_im_id not in new_res_dict:
new_res_dict[scene_im_id] = []
new_res_dict[scene_im_id].append(cur_new_res)
inout.save_json(new_res_path, new_res_dict)
iprint()
iprint("new result path: {}".format(new_res_path))
| 52.719298 | 146 | 0.708985 |
139bcb633d3c2b224334dad0ddfc97013f3a8ff8 | 918 | py | Python | tests/test_app/rest_app/rest_app/services/account_service.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | 12 | 2018-09-06T06:14:59.000Z | 2021-04-18T06:30:44.000Z | tests/test_app/rest_app/rest_app/services/account_service.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | null | null | null | tests/test_app/rest_app/rest_app/services/account_service.py | jadbin/guniflask | 36253a962c056abf34884263c6919b02b921ad9c | [
"MIT"
] | 2 | 2019-09-08T22:01:26.000Z | 2020-08-03T07:23:29.000Z | from flask import abort
from guniflask.context import service
from ..config.jwt_config import jwt_manager
| 27.818182 | 102 | 0.59695 |
139ca9c9fd0f3cc444f7df8ee3874d848532508e | 616 | py | Python | test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 1,602 | 2015-01-06T11:26:31.000Z | 2022-03-30T06:17:21.000Z | test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 11,789 | 2015-01-05T04:50:15.000Z | 2022-03-31T23:39:19.000Z | test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 498 | 2015-01-08T18:58:18.000Z | 2022-03-20T15:37:45.000Z | import pandas as pd
I = ["A", "B", "C", "D", "E"]
oneDigit = pd.Series([1, 2, 3, 4, 5], pd.Index(I))
twoDigit = pd.Series([10, 20, 30, 40, 50], pd.Index(I))
print "addends:"
print oneDigit
print twoDigit
print
print "sum:"
print oneDigit + twoDigit
print
I2 = ["A", "B", "C"]
I3 = ["B", "C", "D", "E"]
X = pd.Series([0, 1, 2], pd.Index(I2))
Y = pd.Series([10, 20, 0, 0], pd.Index(I3))
print "addends:"
print X
print Y
print
print "sum:"
print X + Y
print
A = pd.Series(["hello ", "my ", "name", "is", "brad"])
B = pd.Series(["world", "real"])
print "addends:"
print A
print B
print
print "sum: "
print A + B
| 16.210526 | 55 | 0.576299 |
139ccafc558ec94667dba3f86f2f3f760f5cf3e5 | 11,176 | py | Python | nelly/parser.py | shawcx/nelly | 8075b92e20064a117f9ab5a6d8ad261d21234111 | [
"MIT"
] | null | null | null | nelly/parser.py | shawcx/nelly | 8075b92e20064a117f9ab5a6d8ad261d21234111 | [
"MIT"
] | null | null | null | nelly/parser.py | shawcx/nelly | 8075b92e20064a117f9ab5a6d8ad261d21234111 | [
"MIT"
] | null | null | null | #
# (c) 2008-2020 Matthew Shaw
#
import sys
import os
import re
import logging
import nelly
from .scanner import Scanner
from .program import Program
from .types import *
| 34.708075 | 99 | 0.54796 |
139d2344b35cd1e7a61819201ca64cbfee2afef8 | 2,363 | py | Python | qcodes/utils/installation_info.py | zhinst/Qcodes | d95798bd08d57bb8cddd460fdb4a5ff25f19215c | [
"MIT"
] | 1 | 2020-10-19T08:09:04.000Z | 2020-10-19T08:09:04.000Z | qcodes/utils/installation_info.py | M1racleShih/Qcodes | c03029a6968e16379155aadc8b083a02e01876a6 | [
"MIT"
] | 230 | 2020-08-17T06:08:33.000Z | 2022-03-29T12:06:58.000Z | qcodes/utils/installation_info.py | nikhartman/Qcodes | 042c5e25ab9e40b20c316b4055c4842844834d1e | [
"MIT"
] | 4 | 2017-12-11T12:13:41.000Z | 2018-08-01T13:13:04.000Z | """
This module contains helper functions that provide information about how
QCoDeS is installed and about what other packages are installed along with
QCoDeS
"""
import sys
from typing import Dict, List, Optional
import subprocess
import json
import logging
import requirements
if sys.version_info >= (3, 8):
from importlib.metadata import distribution, version, PackageNotFoundError
else:
# 3.7 and earlier
from importlib_metadata import distribution, version, PackageNotFoundError
import qcodes
log = logging.getLogger(__name__)
def is_qcodes_installed_editably() -> Optional[bool]:
"""
Try to ask pip whether QCoDeS is installed in editable mode and return
the answer a boolean. Returns None if pip somehow did not respond as
expected.
"""
answer: Optional[bool]
try:
pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',
'--format=json'],
check=True,
stdout=subprocess.PIPE)
e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))
answer = any([d["name"] == 'qcodes' for d in e_pkgs])
except Exception as e: # we actually do want a catch-all here
log.warning(f'{type(e)}: {str(e)}')
answer = None
return answer
def get_qcodes_version() -> str:
"""
Get the version of the currently installed QCoDeS
"""
return qcodes.version.__version__
def get_qcodes_requirements() -> List[str]:
"""
Return a list of the names of the packages that QCoDeS requires
"""
qc_pkg = distribution('qcodes').requires
if qc_pkg is None:
return []
package_names = [list(requirements.parse(req))[0].name for req in qc_pkg]
return package_names
def get_qcodes_requirements_versions() -> Dict[str, str]:
"""
Return a dictionary of the currently installed versions of the packages
that QCoDeS requires. The dict maps package name to version string.
If an (optional) dependency is not installed the name maps to "Not installed".
"""
req_names = get_qcodes_requirements()
req_versions = {}
for req in req_names:
try:
req_versions[req] = version(req)
except PackageNotFoundError:
req_versions[req] = "Not installed"
return req_versions
| 28.130952 | 84 | 0.656792 |
139d2693f9221f951071ee2118de0f027b954129 | 1,177 | py | Python | documents/views.py | brandonrobertz/foia-pdf-processing-system | 025516b5e2234df16741237c4208cd484f577370 | [
"MIT"
] | null | null | null | documents/views.py | brandonrobertz/foia-pdf-processing-system | 025516b5e2234df16741237c4208cd484f577370 | [
"MIT"
] | null | null | null | documents/views.py | brandonrobertz/foia-pdf-processing-system | 025516b5e2234df16741237c4208cd484f577370 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
from .models import FieldCategory
| 28.02381 | 58 | 0.614274 |
139d4a4bd97d70f26bdab675ca59d3c9590754fc | 746 | py | Python | tests/test_provider_Mongey_kafka_connect.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | tests/test_provider_Mongey_kafka_connect.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | tests/test_provider_Mongey_kafka_connect.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # tests/test_provider_Mongey_kafka-connect.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:11 UTC)
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Mongey.kafka_connect
#
# t = terrascript.provider.Mongey.kafka_connect.kafka_connect()
# s = str(t)
#
# assert 'https://github.com/Mongey/terraform-provider-kafka-connect' in s
# assert '0.2.3' in s
| 29.84 | 81 | 0.758713 |
139ed09f5c5b42cbc50f76d8cc6ce28401b30b04 | 12,850 | py | Python | application.py | nicholsont/catalog_app | 011e4c35401aa1128a4cf1ca99dd808da7a759e6 | [
"Unlicense"
] | null | null | null | application.py | nicholsont/catalog_app | 011e4c35401aa1128a4cf1ca99dd808da7a759e6 | [
"Unlicense"
] | null | null | null | application.py | nicholsont/catalog_app | 011e4c35401aa1128a4cf1ca99dd808da7a759e6 | [
"Unlicense"
] | null | null | null | from flask import Flask, render_template, request, redirect, jsonify, g
from flask import url_for, flash, make_response
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from models import Base, Category, Item, User
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
app = Flask(__name__)
# Retrieves client ID's and secrets from the json files
CLIENT_ID = json.loads(open('client_secrets.json', 'r')
.read())['web']['client_id']
APP_ID = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_id']
APP_SECRET = json.loads(open('fb_client_secrets.json', 'r')
.read())['web']['app_secret']
# Connect to Database and create database session
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Login handler
# Third Party Oauth callback
def createUser(login_session):
newUser = User(username=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
# Revoke current user's token and reset login_session
# JSON APIs to view Category Information.
# Show all Categories and the latest items
# Show Items in a category item
# Show an item in a category
# Create a new item
# Edit a category item
# Delete a category item
if __name__ == '__main__':
app.secret_key = 'N10kuN!'
app.debug = True
app.run(host='0.0.0.0', port=5000)
| 39.296636 | 79 | 0.63323 |
139ed5391c8324e35fd54e409887ff876db4d1d0 | 239 | py | Python | noo/impl/utils/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 2 | 2022-02-03T07:35:46.000Z | 2022-02-03T16:12:25.000Z | noo/impl/utils/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 2 | 2022-03-05T02:31:38.000Z | 2022-03-05T21:26:42.000Z | noo/impl/utils/__init__.py | nooproject/noo | 238711c55faeb1226a4e5339cd587a312c4babac | [
"MIT"
] | 1 | 2022-03-05T01:40:29.000Z | 2022-03-05T01:40:29.000Z | from .echo import echo, set_quiet
from .errors import NooException, cancel
from .store import STORE, FileStore, Store
__all__ = (
"FileStore",
"NooException",
"Store",
"STORE",
"cancel",
"echo",
"set_quiet",
)
| 17.071429 | 42 | 0.635983 |
139f6d8d256ac39b6d5d2e96db49c8e71d3fc905 | 20,472 | py | Python | ai2thor/server.py | aliang8/ai2thor | 3ef92cf5437e2d60127c77bd59d5b7394eebb36c | [
"Apache-2.0"
] | 1 | 2019-04-11T14:51:04.000Z | 2019-04-11T14:51:04.000Z | ai2thor/server.py | aliang8/ai2thor | 3ef92cf5437e2d60127c77bd59d5b7394eebb36c | [
"Apache-2.0"
] | null | null | null | ai2thor/server.py | aliang8/ai2thor | 3ef92cf5437e2d60127c77bd59d5b7394eebb36c | [
"Apache-2.0"
] | null | null | null | # Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import logging
import sys
import os
import os.path
try:
from queue import Empty
except ImportError:
from Queue import Empty
import time
import warnings
from flask import Flask, request, make_response, abort
import werkzeug
import werkzeug.serving
import werkzeug.http
import numpy as np
from enum import Enum
from ai2thor.util.depth import apply_real_noise, generate_noise_indices
logging.getLogger('werkzeug').setLevel(logging.ERROR)
werkzeug.serving.WSGIRequestHandler.protocol_version = 'HTTP/1.1'
MAX_DEPTH = 5000
# get with timeout to allow quit
def read_buffer_image(buf, width, height, flip_y=True, flip_x=False, dtype=np.uint8,
flip_rb_colors=False):
im_bytes = np.frombuffer(buf.tobytes(), dtype=dtype) if sys.version_info.major < 3 \
else np.frombuffer(buf, dtype=dtype)
im = im_bytes.reshape(height, width, -1)
if flip_y:
im = np.flip(im, axis=0)
if flip_x:
im = np.flip(im, axis=1)
if flip_rb_colors:
im = im[..., ::-1]
return im
def unique_rows(arr, return_index=False, return_inverse=False):
arr = np.ascontiguousarray(arr).copy()
b = arr.view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
if return_inverse:
_, idx, inv = np.unique(b, return_index=True, return_inverse=True)
else:
_, idx = np.unique(b, return_index=True)
unique = arr[idx]
if return_index and return_inverse:
return unique, idx, inv
elif return_index:
return unique, idx
elif return_inverse:
return unique, inv
else:
return unique
class MultipartFormParser(object):
class DepthFormat(Enum):
Meters = 0,
Normalized = 1,
Millimeters = 2
class Server(object):
| 37.289617 | 148 | 0.621483 |
139f98ba0220830de5e89cabcde17bead64e5fb5 | 625 | py | Python | setup.py | ooreilly/mydocstring | 077cebfb86575914d343bd3291b9e6c5e8beef94 | [
"MIT"
] | 13 | 2018-12-11T00:34:09.000Z | 2022-03-22T20:41:04.000Z | setup.py | ooreilly/mydocstring | 077cebfb86575914d343bd3291b9e6c5e8beef94 | [
"MIT"
] | 13 | 2018-06-15T19:42:06.000Z | 2020-12-18T22:20:02.000Z | setup.py | ooreilly/mydocstring | 077cebfb86575914d343bd3291b9e6c5e8beef94 | [
"MIT"
] | 5 | 2018-06-16T07:45:49.000Z | 2020-12-12T07:12:00.000Z | from setuptools import setup
setup(name='mydocstring',
version='0.2.7',
description="""A tool for extracting and converting Google-style docstrings to
plain-text, markdown, and JSON.""",
url='http://github.com/ooreilly/mydocstring',
author="Ossian O'Reilly",
license='MIT',
packages=['mydocstring'],
install_requires=['mako', 'docopt'],
entry_points = {
'console_scripts': [
'mydocstring=mydocstring.docstring:main',
],},
package_data={'mydocstring': ['templates/google_docstring.md']},
zip_safe=False)
| 32.894737 | 84 | 0.6048 |
13a03223c85f270b2a1843680f883661d539e4c0 | 903 | py | Python | anyser/impls/bson.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | anyser/impls/bson.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | anyser/impls/bson.py | Cologler/anyser-python | 52afa0a62003adcfe269f47d81863e00381d8ff9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <[email protected]>
# ----------
#
# ----------
import bson
import struct
from ..err import SerializeError
from ..abc import *
from ..core import register_format
| 25.083333 | 57 | 0.605759 |
13a130fcf753fdc5859f92859bfe939d85633259 | 69,875 | py | Python | tests/test_config_parser.py | KevinMFong/pyhocon | 091830001f2d44f91f0f8281fb119c87fd1f6660 | [
"Apache-2.0"
] | 424 | 2015-01-03T02:48:46.000Z | 2022-03-22T02:47:43.000Z | tests/test_config_parser.py | KevinMFong/pyhocon | 091830001f2d44f91f0f8281fb119c87fd1f6660 | [
"Apache-2.0"
] | 251 | 2015-02-03T20:47:53.000Z | 2022-03-19T16:45:15.000Z | tests/test_config_parser.py | KevinMFong/pyhocon | 091830001f2d44f91f0f8281fb119c87fd1f6660 | [
"Apache-2.0"
] | 127 | 2015-01-09T14:31:49.000Z | 2022-03-19T15:47:30.000Z | # -*- encoding: utf-8 -*-
import json
import os
import shutil
import tempfile
from collections import OrderedDict
from datetime import timedelta
from pyparsing import ParseBaseException, ParseException, ParseSyntaxException
import mock
import pytest
from pyhocon import (ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree)
from pyhocon.exceptions import (ConfigException, ConfigMissingException,
ConfigWrongTypeException)
try:
from dateutil.relativedelta import relativedelta as period
except Exception:
from datetime import timedelta as period
def test_parse_string_with_duration_with_long_unit_name(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: 10 weeks
c: bar
"""
)
assert config['b'] == period(weeks=10)
def test_parse_with_list_mixed_types_with_durations_and_trailing_comma(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
"""
)
assert config['b'] == ['a', 1, period(weeks=10), period(minutes=5)]
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_dotted_notation_merge(self):
config = ConfigFactory.parse_string(
"""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
"""
)
assert config['a.b'] == "foo"
assert config['a.c'] == "foo foo"
assert config['a.d'] == "baz"
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
b = [null]
"""
)
assert config.get('a') is None
assert config.get('b')[0] is None
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_string_substitutions_with_no_space(self):
config = ConfigFactory.parse_string(
"""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
"""
)
assert config.get('app.java_opts') == [
'-Xms128m',
'-Xmx128m'
]
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_multiple_substitutions(self):
config = ConfigFactory.parse_string(
"""
a = 5
b=${a}${a}
c=${a} ${a}
"""
)
assert config == {
'a': 5,
'b': '55',
'c': '5 5'
}
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
config5 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
"""
)
assert config5['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
config6 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
"""
)
assert config6['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
def test_dos_chars_with_unquoted_string_noeol(self):
config = ConfigFactory.parse_string("foo = bar")
assert config['foo'] == 'bar'
def test_dos_chars_with_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = "5"')
assert config['foo'] == '5'
def test_dos_chars_with_triple_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = """5"""')
assert config['foo'] == '5'
def test_dos_chars_with_int_noeol(self):
config = ConfigFactory.parse_string("foo = 5")
assert config['foo'] == 5
def test_dos_chars_with_float_noeol(self):
config = ConfigFactory.parse_string("foo = 5.0")
assert config['foo'] == 5.0
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_list_element_substitution(self):
config = ConfigFactory.parse_string(
"""
main_language = php
languages = [java, ${main_language}]
"""
)
assert config.get('languages') == ['java', 'php']
def test_substitution_list_with_append(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC',
]
def test_substitution_list_with_append_substitution(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC'
]
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_self_ref_substitution_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
"""
)
assert config.get("x") == [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
def test_self_append_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x += [3,4]
"""
)
assert config.get("x") == [1, 2, 3, 4]
def test_self_append_string(self):
'''
Should be equivalent to
x = abc
x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x = abc
x += def
"""
)
assert config.get("x") == "abc def"
def test_self_append_non_existent_string(self):
'''
Should be equivalent to x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x += def
"""
)
assert config.get("x") == " def"
def test_self_append_nonexistent_array(self):
config = ConfigFactory.parse_string(
"""
x += [1,2]
"""
)
assert config.get("x") == [1, 2]
def test_self_append_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1}
x += {b: 2}
"""
)
assert config.get("x") == {'a': 1, 'b': 2}
def test_self_append_nonexistent_object(self):
config = ConfigFactory.parse_string(
"""
x += {a: 1}
"""
)
assert config.get("x") == {'a': 1}
def test_self_ref_substitution_array_to_dict(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
"""
)
assert config.get("x.x") == [3, 4]
assert config.get("x.y") == [5, 6]
assert config.get("x.z") == {'x': [3, 4], 'y': [5, 6]}
def test_self_ref_substitiotion_dict_in_array(self):
config = ConfigFactory.parse_string(
"""
x = {x: [3,4]}
x = [${x}, 2, 3]
"""
)
(one, two, three) = config.get("x")
assert one == {'x': [3, 4]}
assert two == 2
assert three == 3
def test_self_ref_substitution_dict_path(self):
config = ConfigFactory.parse_string(
"""
x = {y: {z: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == {'z': 1}
assert config.get("x.z") == 1
assert set(config.get("x").keys()) == set(['y', 'z'])
def test_self_ref_substitution_dict_path_hide(self):
config = ConfigFactory.parse_string(
"""
x = {y: {y: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == 1
assert set(config.get("x").keys()) == set(['y'])
def test_self_ref_substitution_dict_recurse(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
"""
)
def test_self_ref_substitution_dict_recurse2(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
x = ${x}
"""
)
def test_self_ref_substitution_dict_merge(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
"""
)
assert config.get('foo') == {'a': 2, 'c': 1}
assert set(config.keys()) == set(['foo'])
def test_self_ref_substitution_dict_otherfield(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
"""
)
assert config.get("bar") == {'foo': 42, 'baz': 42}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
"""
)
assert config.get("bar") == {'foo': 43, 'baz': 43}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in_mutual(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
"""
)
assert config.get("bar") == {'a': 4, 'b': 3}
assert config.get("foo") == {'c': 3, 'd': 4}
assert set(config.keys()) == set(['bar', 'foo'])
def test_self_ref_substitution_string_opt_concat(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
a = ${?a}foo
"""
)
assert config.get("a") == 'foo'
assert set(config.keys()) == set(['a'])
def test_self_ref_substitution_dict_recurse_part(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x} {y: 1}
x = ${x.y}
"""
)
def test_self_ref_substitution_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
"""
)
assert config.get("x") == {'a': 1, 'b': 2, 'c': 3, 'z': 0, 'y': -1, 'd': 4}
def test_self_ref_child(self):
config = ConfigFactory.parse_string(
"""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
"""
)
assert config.get("a") == {'b': 3, 'c': [1, 2], 'd': {'foo': 'bar'}}
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_parse_URL_from_invalid(self):
config = ConfigFactory.parse_URL("https://nosuchurl")
assert config == []
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_include_glob_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/all_animals.conf")
assert config.get('animals.garfield.say') == 'meow'
assert config.get('animals.mutt.hates.garfield.say') == 'meow'
def test_include_glob_list_from_samples(self):
config = ConfigFactory.parse_file("samples/all_bars.conf")
bars = config.get_list('bars')
assert len(bars) == 10
names = {bar['name'] for bar in bars}
types = {bar['type'] for bar in bars if 'type' in bar}
print(types, '(((((')
assert 'Bloody Mary' in names
assert 'Homer\'s favorite coffee' in names
assert 'milk' in types
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_file(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2]
config2 = ConfigFactory.parse_string(
"""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [1, 2]
def test_include_missing_file(self):
config1 = ConfigFactory.parse_string(
"""
a: [
include "dummy.txt"
3
4
]
"""
)
assert config1['a'] == [3, 4]
def test_include_required_file(self):
config = ConfigFactory.parse_string(
"""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
"""
)
expected = {
'a': {
'garfield': {
'say': 'meow'
},
't': 2
}
}
assert expected == config
config2 = ConfigFactory.parse_string(
"""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
"""
)
assert expected == config2
def test_include_missing_required_file(self):
with pytest.raises(IOError):
ConfigFactory.parse_string(
"""
a: [
include required("dummy.txt")
3
4
]
"""
)
def test_resolve_package_path(self):
path = ConfigParser.resolve_package_path("pyhocon:config_parser.py")
assert os.path.exists(path)
def test_resolve_package_path_format(self):
with pytest.raises(ValueError):
ConfigParser.resolve_package_path("pyhocon/config_parser.py")
def test_resolve_package_path_missing(self):
with pytest.raises(ImportError):
ConfigParser.resolve_package_path("non_existent_module:foo.py")
def test_include_package_file(self, monkeypatch):
temp_dir = tempfile.mkdtemp()
try:
module_dir = os.path.join(temp_dir, 'my_module')
module_conf = os.path.join(module_dir, 'my.conf')
# create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)
open(os.path.join(module_dir, '__init__.py'), 'a').close()
with open(module_conf, 'w') as fdin:
fdin.write("{c: 3}")
# add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)
# load the config and include the other config file from 'my_module'
config = ConfigFactory.parse_string(
"""
a: 1
b: 2
include package("my_module:my.conf")
"""
)
# check that the contents of both config files are available
assert dict(config.as_plain_ordered_dict()) == {'a': 1, 'b': 2, 'c': 3}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
def test_sci_real(self):
"""
Test scientific expression of number
"""
config = ConfigFactory.parse_string(
"""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12.12321
assert config['long1'] == 121.22E3423432
assert config['neg_long1'] == 121.22E-1
assert config['long2'] == 121.22E3423432
assert config['neg_long2'] == 121.22E-3
def test_unicode_dict_key(self):
input_string = u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-.com {
us {
name = "second domain"
}
}
"""
config = ConfigFactory.parse_string(input_string)
assert config.get_string(u'www.sample.com.us.name') == 'first domain'
assert config.get_string(u'www.example-.com.us.name') == 'second domain'
with pytest.raises(ConfigWrongTypeException):
config.put(u'www.example-', 'append_failure', append=True)
with pytest.raises(ConfigMissingException):
config.get_string(u'missing_unicode_key_')
with pytest.raises(ConfigException):
config.get_bool(u'www.example-.com.us.name')
with pytest.raises(ConfigException):
config.get_list(u'www.example-.com.us.name')
with pytest.raises(ConfigException):
config.get_config(u'www.example-.com.us.name')
with pytest.raises(ConfigWrongTypeException):
config.get_string(u'www.example-.com.us.name.missing')
def test_with_comment_on_last_line(self):
# Adress issue #102
config_tree = ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")
assert config_tree == {
'foo': '1',
'bar': '2'
}
def test_triple_quotes_same_line(self):
config_tree = ConfigFactory.parse_string('a:["""foo"""", "bar"]')
assert config_tree == {
'a': ['foo"', "bar"]
}
def test_pop(self):
config_tree = ConfigFactory.parse_string('a:{b: 3, d: 6}')
assert 3 == config_tree.pop('a.b', 5)
assert 5 == config_tree.pop('a.c', 5)
expected = {
'a': {'d': 6}
}
assert expected == config_tree
def test_merge_overriden(self):
# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
config_tree = ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")
assert 'r' in config_tree['foo'] and 't' in config_tree['foo'] and config_tree['foo']['s'] == 3
def test_attr_syntax(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: {
pb: 5
}
""")
assert 5 == config.b.pb
def test_escape_quote(self):
config = ConfigFactory.parse_string(
"""
quoted: "abc\\"test"
unquoted: abc\\"test
""")
assert 'abc"test' == config['quoted']
assert 'abc"test' == config['unquoted']
def test_escape_quote_complex(self):
config = ConfigFactory.parse_string(
"""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
"""
)
assert '{"critical":"0.00","warning":"99.99"}' == config['value']
def test_keys_with_slash(self):
config = ConfigFactory.parse_string(
"""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")
assert 'abc' == config['/abc/cde1']
assert 'cde' == config['/abc/cde2']
assert 'fgh' == config['/abc/cde3']
def test_mutation_values(self):
config = ConfigFactory.parse_string(
"""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")
assert config.get("b1")[1]['VAR'] == 'right'
assert config.get("b2")[1]['VAR'] == 'right'
def test_escape_sequences_json_equivalence(self):
"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""
source = r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""
expected = {
'plain-backslash': '\\',
'tab': '\t',
'no-tab': '\\t',
'newline': '\n',
'no-newline': '\\n',
'cr': '\r',
'no-cr': '\\r',
'windows': 'c:\\temp',
}
config = ConfigFactory.parse_string(source)
assert config == expected
assert config == json.loads(source)
try:
from dateutil.relativedelta import relativedelta
except Exception:
pass
| 28.381397 | 120 | 0.44385 |
13a1474b58c5efbf18c61bee86e2d5e292bdda41 | 7,416 | py | Python | scenario_runner/srunner/scenariomanager/scenario_manager.py | cgeller/WorldOnRails | d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef | [
"MIT"
] | 447 | 2021-03-26T09:29:17.000Z | 2022-03-30T03:03:35.000Z | scenario_runner/srunner/scenariomanager/scenario_manager.py | cgeller/WorldOnRails | d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef | [
"MIT"
] | 56 | 2021-04-21T03:12:50.000Z | 2022-03-30T13:34:16.000Z | scenario_runner/srunner/scenariomanager/scenario_manager.py | cgeller/WorldOnRails | d8aa9f7ae67a6b7b71a2fc5ba86bb2a44f221bef | [
"MIT"
] | 82 | 2021-04-14T04:34:04.000Z | 2022-03-29T07:35:15.000Z | #!/usr/bin/env python
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the ScenarioManager implementation.
It must not be modified and is for reference only!
"""
from __future__ import print_function
import sys
import time
import py_trees
from srunner.autoagents.agent_wrapper import AgentWrapper
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.result_writer import ResultOutputProvider
from srunner.scenariomanager.timer import GameTime
from srunner.scenariomanager.watchdog import Watchdog
| 31.965517 | 95 | 0.631338 |
13a20e94df54130a998b207ca8a8c8a5a8437f0f | 43,104 | py | Python | edb/schema/referencing.py | disfated/edgedb | 8d78f4a2a578f80780be160ba5f107f5bdc79063 | [
"Apache-2.0"
] | null | null | null | edb/schema/referencing.py | disfated/edgedb | 8d78f4a2a578f80780be160ba5f107f5bdc79063 | [
"Apache-2.0"
] | null | null | null | edb/schema/referencing.py | disfated/edgedb | 8d78f4a2a578f80780be160ba5f107f5bdc79063 | [
"Apache-2.0"
] | null | null | null | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import hashlib
from edb import errors
from edb.common import struct
from edb.edgeql import ast as qlast
from . import delta as sd
from . import inheriting
from . import objects as so
from . import schema as s_schema
from . import name as sn
from . import utils
ReferencedT = TypeVar('ReferencedT', bound='ReferencedObject')
ReferencedInheritingObjectT = TypeVar('ReferencedInheritingObjectT',
bound='ReferencedInheritingObject')
def _build_alter_cmd_stack(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
scls: so.Object,
*,
referrer: Optional[so.Object] = None
) -> Tuple[sd.DeltaRoot, sd.Command]:
delta = sd.DeltaRoot()
if referrer is None:
assert isinstance(scls, ReferencedObject)
referrer = scls.get_referrer(schema)
obj = referrer
object_stack = []
if type(self) != type(referrer):
object_stack.append(referrer)
while obj is not None:
if isinstance(obj, ReferencedObject):
obj = obj.get_referrer(schema)
object_stack.append(obj)
else:
obj = None
cmd: sd.Command = delta
for obj in reversed(object_stack):
assert obj is not None
alter_cmd_cls = sd.ObjectCommandMeta.get_command_class_or_die(
sd.AlterObject, type(obj))
alter_cmd = alter_cmd_cls(classname=obj.get_name(schema))
cmd.add(alter_cmd)
cmd = alter_cmd
return delta, cmd
class CreateReferencedObject(
ReferencedObjectCommand[ReferencedT],
sd.CreateObject[ReferencedT],
):
referenced_astnode: ClassVar[Type[qlast.ObjectDDL]]
def _create_innards(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
referrer_ctx = self.get_referrer_context(context)
if referrer_ctx is None:
return super()._create_innards(schema, context)
else:
referrer = referrer_ctx.scls
schema = self._create_ref(schema, context, referrer)
return super()._create_innards(schema, context)
def _create_ref(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
referrer: so.Object,
) -> s_schema.Schema:
referrer_cls = type(referrer)
mcls = type(self.scls)
refdict = referrer_cls.get_refdict_for_class(mcls)
schema = referrer.add_classref(schema, refdict.attr, self.scls)
return schema
class DeleteReferencedObjectCommand(
ReferencedObjectCommand[ReferencedT],
sd.DeleteObject[ReferencedT],
):
class ReferencedInheritingObjectCommand(
ReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.InheritingObjectCommand[ReferencedInheritingObjectT],
):
class CreateReferencedInheritingObject(
CreateReferencedObject[ReferencedInheritingObjectT],
inheriting.CreateInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
class AlterReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.AlterInheritingObject[ReferencedInheritingObjectT],
):
class RebaseReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
inheriting.RebaseInheritingObject[ReferencedInheritingObjectT],
):
implicit = struct.Field(bool, default=False)
class RenameReferencedInheritingObject(
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
sd.RenameObject,
):
class DeleteReferencedInheritingObject(
DeleteReferencedObjectCommand[ReferencedInheritingObjectT],
inheriting.DeleteInheritingObject[ReferencedInheritingObjectT],
ReferencedInheritingObjectCommand[ReferencedInheritingObjectT],
):
| 35.015435 | 79 | 0.588994 |
13a2d4c2633ce0ba08875637d40181583a434b5a | 1,740 | py | Python | tools.py | Jakuko99/effectb | ab6688ce3679cdd2cf43038f7bfef67dabf97c1b | [
"MIT"
] | 1 | 2021-05-31T09:21:19.000Z | 2021-05-31T09:21:19.000Z | tools.py | Jakuko99/effectb | ab6688ce3679cdd2cf43038f7bfef67dabf97c1b | [
"MIT"
] | null | null | null | tools.py | Jakuko99/effectb | ab6688ce3679cdd2cf43038f7bfef67dabf97c1b | [
"MIT"
] | null | null | null | from calendar import month_name | 29 | 77 | 0.468391 |
13a338753672931f84d30f4a3787e44f246ba8c1 | 583 | py | Python | Bugscan_exploits-master/exp_list/exp-2307.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 11 | 2020-05-30T13:53:49.000Z | 2021-03-17T03:20:59.000Z | Bugscan_exploits-master/exp_list/exp-2307.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-13T03:25:18.000Z | 2020-07-21T06:24:16.000Z | Bugscan_exploits-master/exp_list/exp-2307.py | csadsl/poc_exp | e3146262e7403f19f49ee2db56338fa3f8e119c9 | [
"MIT"
] | 6 | 2020-05-30T13:53:51.000Z | 2020-12-01T21:44:26.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Author__ =
#_PlugName_ = Shop7z /admin/lipinadd.asp
import re
if __name__ == '__main__':
from dummy import *
audit(assign('shop7z', 'http://www.99ysbjw.com/')[1]) | 27.761905 | 80 | 0.603774 |
13a3a70ae5392650e62af677f5914bc6b6d670e2 | 18,444 | py | Python | homeassistant/components/hue/light.py | dlangerm/core | 643acbf9484fd05161d7e9f2228c9c92a5ce7d0b | [
"Apache-2.0"
] | 5 | 2017-01-26T16:33:09.000Z | 2018-07-20T13:50:47.000Z | homeassistant/components/hue/light.py | dlangerm/core | 643acbf9484fd05161d7e9f2228c9c92a5ce7d0b | [
"Apache-2.0"
] | 68 | 2018-10-04T16:01:20.000Z | 2022-03-31T06:21:46.000Z | homeassistant/components/hue/light.py | dlangerm/core | 643acbf9484fd05161d7e9f2228c9c92a5ce7d0b | [
"Apache-2.0"
] | 7 | 2018-10-04T10:12:45.000Z | 2021-12-29T20:55:40.000Z | """Support for the Philips Hue lights."""
from __future__ import annotations
from datetime import timedelta
from functools import partial
import logging
import random
import aiohue
import async_timeout
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from homeassistant.util import color
from .const import (
DOMAIN as HUE_DOMAIN,
GROUP_TYPE_LIGHT_GROUP,
GROUP_TYPE_LIGHT_SOURCE,
GROUP_TYPE_LUMINAIRE,
GROUP_TYPE_ROOM,
REQUEST_REFRESH_DELAY,
)
from .helpers import remove_devices
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
SUPPORT_HUE_ON_OFF = SUPPORT_FLASH | SUPPORT_TRANSITION
SUPPORT_HUE_DIMMABLE = SUPPORT_HUE_ON_OFF | SUPPORT_BRIGHTNESS
SUPPORT_HUE_COLOR_TEMP = SUPPORT_HUE_DIMMABLE | SUPPORT_COLOR_TEMP
SUPPORT_HUE_COLOR = SUPPORT_HUE_DIMMABLE | SUPPORT_EFFECT | SUPPORT_COLOR
SUPPORT_HUE_EXTENDED = SUPPORT_HUE_COLOR_TEMP | SUPPORT_HUE_COLOR
SUPPORT_HUE = {
"Extended color light": SUPPORT_HUE_EXTENDED,
"Color light": SUPPORT_HUE_COLOR,
"Dimmable light": SUPPORT_HUE_DIMMABLE,
"On/Off plug-in unit": SUPPORT_HUE_ON_OFF,
"Color temperature light": SUPPORT_HUE_COLOR_TEMP,
}
ATTR_IS_HUE_GROUP = "is_hue_group"
GAMUT_TYPE_UNAVAILABLE = "None"
# Minimum Hue Bridge API version to support groups
# 1.4.0 introduced extended group info
# 1.12 introduced the state object for groups
# 1.13 introduced "any_on" to group state objects
GROUP_MIN_API_VERSION = (1, 13, 0)
def create_light(item_class, coordinator, bridge, is_group, rooms, api, item_id):
"""Create the light."""
api_item = api[item_id]
if is_group:
supported_features = 0
for light_id in api_item.lights:
if light_id not in bridge.api.lights:
continue
light = bridge.api.lights[light_id]
supported_features |= SUPPORT_HUE.get(light.type, SUPPORT_HUE_EXTENDED)
supported_features = supported_features or SUPPORT_HUE_EXTENDED
else:
supported_features = SUPPORT_HUE.get(api_item.type, SUPPORT_HUE_EXTENDED)
return item_class(
coordinator, bridge, is_group, api_item, supported_features, rooms
)
def hue_brightness_to_hass(value):
"""Convert hue brightness 1..254 to hass format 0..255."""
return min(255, round((value / 254) * 255))
def hass_to_hue_brightness(value):
"""Convert hass brightness 0..255 to hue 1..254 scale."""
return max(1, round((value / 255) * 254))
class HueLight(CoordinatorEntity, LightEntity):
"""Representation of a Hue light."""
def __init__(self, coordinator, bridge, is_group, light, supported_features, rooms):
"""Initialize the light."""
super().__init__(coordinator)
self.light = light
self.bridge = bridge
self.is_group = is_group
self._supported_features = supported_features
self._rooms = rooms
if is_group:
self.is_osram = False
self.is_philips = False
self.is_innr = False
self.is_ewelink = False
self.is_livarno = False
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
else:
self.is_osram = light.manufacturername == "OSRAM"
self.is_philips = light.manufacturername == "Philips"
self.is_innr = light.manufacturername == "innr"
self.is_ewelink = light.manufacturername == "eWeLink"
self.is_livarno = light.manufacturername.startswith("_TZ3000_")
self.gamut_typ = self.light.colorgamuttype
self.gamut = self.light.colorgamut
_LOGGER.debug("Color gamut of %s: %s", self.name, str(self.gamut))
if self.light.swupdatestate == "readytoinstall":
err = (
"Please check for software updates of the %s "
"bulb in the Philips Hue App."
)
_LOGGER.warning(err, self.name)
if self.gamut and not color.check_valid_gamut(self.gamut):
err = "Color gamut of %s: %s, not valid, setting gamut to None."
_LOGGER.debug(err, self.name, str(self.gamut))
self.gamut_typ = GAMUT_TYPE_UNAVAILABLE
self.gamut = None
async def async_added_to_hass(self) -> None:
"""Handle entity being added to Home Assistant."""
self.async_on_remove(
self.bridge.listen_updates(
self.light.ITEM_TYPE, self.light.id, self.async_write_ha_state
)
)
await super().async_added_to_hass()
| 32.702128 | 88 | 0.635437 |
13a4eaea9e2402891521cc56201ae27b7976fb0d | 4,794 | py | Python | src/ezdxf/math/bulge.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | src/ezdxf/math/bulge.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | src/ezdxf/math/bulge.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | # Copyright (c) 2018-2021 Manfred Moitzi
# License: MIT License
# source: http://www.lee-mac.com/bulgeconversion.html
# source: http://www.afralisp.net/archive/lisp/Bulges1.htm
from typing import Any, TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = [
"bulge_to_arc", "bulge_3_points", "bulge_center", "bulge_radius",
"arc_to_bulge"
]
def polar(p: Any, angle: float, distance: float) -> Vec2:
""" Returns the point at a specified `angle` and `distance` from point `p`.
Args:
p: point as :class:`Vec2` compatible object
angle: angle in radians
distance: distance
"""
return Vec2(p) + Vec2.from_angle(angle, distance)
def angle(p1: Any, p2: Any) -> float:
""" Returns angle a line defined by two endpoints and x-axis in radians.
Args:
p1: start point as :class:`Vec2` compatible object
p2: end point as :class:`Vec2` compatible object
"""
return (Vec2(p2) - Vec2(p1)).angle
def arc_to_bulge(center: 'Vertex', start_angle: float, end_angle: float,
radius: float) -> Tuple['Vec2', 'Vec2', float]:
"""
Returns bulge parameters from arc parameters.
Args:
center: circle center point as :class:`Vec2` compatible object
start_angle: start angle in radians
end_angle: end angle in radians
radius: circle radius
Returns:
tuple: (start_point, end_point, bulge)
"""
start_point = polar(center, start_angle, radius)
end_point = polar(center, end_angle, radius)
pi2 = math.pi * 2
a = math.fmod((pi2 + (end_angle - start_angle)), pi2) / 4.
bulge = math.sin(a) / math.cos(a)
return start_point, end_point, bulge
def bulge_3_points(start_point: 'Vertex', end_point: 'Vertex',
point: 'Vertex') -> float:
""" Returns bulge value defined by three points.
Based on 3-Points to Bulge by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
point: arbitrary point as :class:`Vec2` compatible object
"""
a = (math.pi - angle(point, start_point) + angle(point, end_point)) / 2
return math.sin(a) / math.cos(a)
def bulge_to_arc(start_point: 'Vertex',
end_point: 'Vertex',
bulge: float) -> Tuple['Vec2', float, float, float]:
""" Returns arc parameters from bulge parameters.
The arcs defined by bulge values of :class:`~ezdxf.entities.LWPolyline`
and 2D :class:`~ezdxf.entities.Polyline` entities start at the vertex which
includes the bulge value and ends at the following vertex.
Based on Bulge to Arc by `Lee Mac`_.
Args:
start_point: start vertex as :class:`Vec2` compatible object
end_point: end vertex as :class:`Vec2` compatible object
bulge: bulge value
Returns:
Tuple: (center, start_angle, end_angle, radius)
"""
r = signed_bulge_radius(start_point, end_point, bulge)
a = angle(start_point, end_point) + (math.pi / 2 - math.atan(bulge) * 2)
c = polar(start_point, a, r)
if bulge < 0:
return c, angle(c, end_point), angle(c, start_point), abs(r)
else:
return c, angle(c, start_point), angle(c, end_point), abs(r)
def bulge_center(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> 'Vec2':
""" Returns center of arc described by the given bulge parameters.
Based on Bulge Center by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value as float
"""
start_point = Vec2(start_point)
a = angle(start_point, end_point) + (math.pi / 2. - math.atan(bulge) * 2.)
return start_point + Vec2.from_angle(a, signed_bulge_radius(start_point,
end_point,
bulge))
def bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
""" Returns radius of arc defined by the given bulge parameters.
Based on Bulge Radius by `Lee Mac`_
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value
"""
return abs(signed_bulge_radius(start_point, end_point, bulge))
| 32.391892 | 79 | 0.629954 |
13a51bb2dfcddb562cef50eb73654bc85d1b9f01 | 666 | py | Python | Plugins/Aspose.Email Java for Python/tests/ProgrammingEmail/ManageAttachments/ManageAttachments.py | aspose-email/Aspose.Email-for-Java | cf4567e54f7979e7296c99bcae2c6477385d7735 | [
"MIT"
] | 24 | 2016-07-29T03:57:35.000Z | 2022-01-18T23:42:08.000Z | Plugins/Aspose.Email Java for Python/tests/ProgrammingEmail/ManageAttachments/ManageAttachments.py | asposeemail/Aspose_Email_Java | cf4567e54f7979e7296c99bcae2c6477385d7735 | [
"MIT"
] | 6 | 2017-07-24T13:08:43.000Z | 2022-01-01T21:51:25.000Z | Plugins/Aspose.Email Java for Python/tests/ProgrammingEmail/ManageAttachments/ManageAttachments.py | aspose-email/Aspose.Email-for-Java | cf4567e54f7979e7296c99bcae2c6477385d7735 | [
"MIT"
] | 25 | 2016-04-09T07:24:12.000Z | 2021-12-19T13:54:21.000Z | # To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
#if __name__ == "__main__":
# print "Hello World"
from ProgrammingEmail import ManageAttachments
import jpype
import os.path
asposeapispath = os.path.join(os.path.abspath("./../../../"), "lib/")
dataDir = os.path.join(os.path.abspath("./"), "data/")
print "You need to put your Aspose.Email for Java APIs .jars in this folder:\n"+asposeapispath
#print dataDir
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.ext.dirs=%s" % asposeapispath)
hw = ManageAttachments(dataDir)
hw.main() | 31.714286 | 94 | 0.735736 |
13a54ea2fa4b8b8724c32c2f486041ebcedd4707 | 33,005 | py | Python | mypython/keys.py | asmeurer/mypython | ae984926739cc2bb3abe70566762d7b4052ed0ae | [
"MIT"
] | 27 | 2017-02-09T06:18:30.000Z | 2022-02-16T08:32:42.000Z | mypython/keys.py | asmeurer/mypython | ae984926739cc2bb3abe70566762d7b4052ed0ae | [
"MIT"
] | 1 | 2022-01-20T20:23:41.000Z | 2022-01-20T20:23:41.000Z | mypython/keys.py | asmeurer/mypython | ae984926739cc2bb3abe70566762d7b4052ed0ae | [
"MIT"
] | 2 | 2019-12-14T06:45:04.000Z | 2021-10-04T00:28:48.000Z | from prompt_toolkit.key_binding.bindings.named_commands import (accept_line,
self_insert, backward_delete_char, beginning_of_line)
from prompt_toolkit.key_binding.bindings.basic import if_no_repeat
from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings
from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings
from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings
from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings
from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.keys import Keys, ALL_KEYS
from prompt_toolkit.filters import Condition, HasSelection, is_searching
from prompt_toolkit.selection import SelectionState
from prompt_toolkit.clipboard import ClipboardData
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.application.current import get_app
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit import __version__ as prompt_toolkit_version
from .multiline import (auto_newline, tab_should_insert_whitespace,
document_is_multiline_python)
from .tokenize import inside_string, matching_parens
from .theme import emoji, emoji_pudb
from .processors import get_pyflakes_warnings
import re
import subprocess
import sys
import textwrap
import platform
r = custom_key_bindings = KeyBindings()
# This can be removed once
# https://github.com/prompt-toolkit/python-prompt-toolkit/pull/857 is in a
# released version of prompt-toolkit.
ANSI_SEQUENCES['\x1b[1;9A'] = (Keys.Escape, Keys.Up)
ANSI_SEQUENCES['\x1b[1;9B'] = (Keys.Escape, Keys.Down)
# Document.start_of_paragraph/end_of_paragraph don't treat multiple blank
# lines correctly.
# Gives the positions right before one or more blank lines
BLANK_LINES = re.compile(r'\S *(\n *\n)')
WORD = re.compile(r'([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')
def insert_text_ovewrite(buffer, data, move_cursor=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = buffer.text
ocpos = buffer.cursor_position
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
buffer.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
if move_cursor:
buffer.cursor_position += len(data)
is_returnable = Condition(
lambda: get_app().current_buffer.is_returnable)
# Always accept the line if the previous key was Up
# Requires https://github.com/jonathanslenders/python-prompt-toolkit/pull/492.
# We don't need a parallel for down because down is already at the end of the
# prompt.
# M-[ a g is set to S-Enter in iTerm2 settings
Keys.ShiftEnter = "<Shift-Enter>"
ALL_KEYS.append('<Shift-Enter>')
ANSI_SEQUENCES['\x1b[ag'] = Keys.ShiftEnter
ANSI_SEQUENCES['\x1bOM'] = Keys.ShiftEnter
if prompt_toolkit_version[0] != '3':
r.add_binding(Keys.ShiftEnter)(accept_line)
LEADING_WHITESPACE = re.compile(r'( *)[^ ]?')
def do_cycle_spacing(text, cursor_position, state=[]):
rstripped = text[:cursor_position].rstrip()
lstripped = text[cursor_position:].lstrip()
text_before_cursor = text[:cursor_position]
# The first element of state is the original text. The last element is the
# buffer text and cursor position as we last left them. If either of those
# have changed, reset. The state here is global, but that's fine, because
# we consider any change to be enough clear the state. The worst that
# happens here is that we resume when we shouldn't if things look exactly
# as they did where we left off.
# TODO: Use event.previous_key_sequence instead.
if state and state[-1] != (text, cursor_position):
state.clear()
if len(state) == 0:
# Replace all whitespace at the cursor (if any) with a single space.
state.append((text, cursor_position))
cursor_position -= len(text_before_cursor) - len(rstripped) -1
text = rstripped + ' ' + lstripped
state.append((text, cursor_position))
elif len(state) == 2:
# Exactly one space at the cursor. Remove it.
cursor_position -= 1
text = rstripped + lstripped
state.append((text, cursor_position))
elif len(state) == 3:
# Restore original text and cursor position
text, cursor_position = state[0]
state.clear()
if cursor_position < 0:
cursor_position = 0
if cursor_position > len(text):
cursor_position = len(text)
return text, cursor_position
# Selection stuff
# The default doesn't toggle correctly
def system_copy(text):
if "Linux" in platform.platform():
copy_command = ['xclip', '-selection', 'c']
else:
copy_command = ['pbcopy']
try:
# In Python 3.6 we can do this:
# run(copy_command, input=text, encoding='utf-8', check=True)
subprocess.run(copy_command, input=text.encode('utf-8'), check=True)
except FileNotFoundError:
print("Error: could not find", copy_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(copy_command[0], "error:", e, file=sys.stderr)
def system_paste():
if "Linux" in platform.platform():
paste_command = ['xsel', '-b']
else:
paste_command = ['pbpaste']
try:
# In Python 3.6 we can do this:
# run(paste_command, input=text, encoding='utf-8')
p = subprocess.run(paste_command, stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
print("Error: could not find", paste_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(paste_command[0], "error:", e, file=sys.stderr)
return p.stdout.decode('utf-8')
# M-[ a b is set to C-S-/ (C-?) in iTerm2 settings
Keys.ControlQuestionmark = "<C-?>"
ALL_KEYS.append("<C-?>")
ANSI_SEQUENCES['\x1b[ab'] = Keys.ControlQuestionmark
Keys.ControlSlash = "<C-/>"
ALL_KEYS.append("<C-/>")
ANSI_SEQUENCES['\x1b"5/'] = Keys.ControlSlash
# This won't work until
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/484 is
# merged.
if prompt_toolkit_version[0] != '3':
# Need to escape all spaces here because of verbose (x) option below
ps1_prompts = [r'>>>\ '] + [re.escape(i) + r'\[\d+\]:\ ' for i, j in emoji + [emoji_pudb]] + [r'In\ \[\d+\]:\ ']
ps2_prompts = [r'\ *\.\.\.:\ ?', r'\.\.\.\ ?', '\N{CLAPPING HANDS SIGN}+\\ ?\\ ?']
PS1_PROMPTS_RE = re.compile('|'.join(ps1_prompts))
PS2_PROMPTS_RE = re.compile('|'.join(ps2_prompts))
PROMPTED_TEXT_RE = re.compile(r'''(?x) # Multiline and verbose
(?P<prompt>
(?P<ps1prompt>{PS1_PROMPTS_RE.pattern}) # Match prompts at the front
| (?P<ps2prompt>{PS2_PROMPTS_RE.pattern}))? # of the line.
(?P<noprompt>(?(prompt)\r|))? # If the prompt is not
# matched, this is a special
# marker group that will match
# the empty string.
# Otherwise it will not
# match (because all \r's
# have been stripped from
# the string).
(?P<line>.*)\n # The actual line.
'''.format(PS1_PROMPTS_RE=PS1_PROMPTS_RE, PS2_PROMPTS_RE=PS2_PROMPTS_RE))
def prompt_repl(match):
r"""
repl function for re.sub for clearing prompts
Replaces PS1 prompts with \r and removes PS2 prompts.
"""
# TODO: Remove the lines with no prompt
if match.group('ps1prompt') is not None:
return '\r' + match.group('line') + '\n'
elif match.group('ps2prompt') is not None:
return match.group('line') + '\n'
return ''
def split_prompts(text, indent=''):
r"""
Takes text copied from mypython, Python, or IPython session and returns a
list of inputs
Outputs are stripped. If no prompts are found the text is left alone.
The resulting text is indented by indent, except for the first line.
It is assumed that the text contains no carriage returns (\r).
Trailing whitespace and newlines is stripped from the outputs.
Example:
>>> split_prompts('''
... In [1]: a = 1
...
... In [2]: a
... Out[2]: 1
...
... In [3]: def test():
... ...: pass
... ...:
... ''')
['a = 1', 'a', 'def test():\n pass']
"""
from .mypython import validate_text
text = textwrap.dedent(text).strip() + '\n'
text = textwrap.dedent(PROMPTED_TEXT_RE.sub(prompt_repl, text)).lstrip()
lines = text.split('\r')
# Make sure multilines end in two newlines
for i, line in enumerate(lines):
try:
validate_text(line)
except SyntaxError:
# If there is a syntax error, we can't use the CMD_QUEUE (it
# breaks things).
lines = ['\n'.join(lines)]
break
if '\n' in line.rstrip():
lines[i] += '\n'
lines[0] = textwrap.indent(lines[0], indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))
for i in range(1, len(lines)):
lines[i] = textwrap.indent(lines[i], indent)
# Extraneous newlines at the end will be stripped by the prompt anyway.
# This just makes this function easier to test.
lines = [i.rstrip() for i in lines]
return lines
| 34.344433 | 117 | 0.676988 |
13a5765c2edbddbec6f546bc1dadb0d5693914fe | 10,909 | py | Python | demand/preday_model_estimation/isg.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 50 | 2018-12-21T08:21:38.000Z | 2022-01-24T09:47:59.000Z | demand/preday_model_estimation/isg.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 2 | 2018-12-19T13:42:47.000Z | 2019-05-13T04:11:45.000Z | demand/preday_model_estimation/isg.py | gusugusu1018/simmobility-prod | d30a5ba353673f8fd35f4868c26994a0206a40b6 | [
"MIT"
] | 27 | 2018-11-28T07:30:34.000Z | 2022-02-05T02:22:26.000Z | from biogeme import *
from headers import *
from loglikelihood import *
from statistics import *
from nested import *
#import random
cons_work= Beta('cons for work', 0,-10,10,0)
cons_edu = Beta('cons for education',0,-50,10,0)
cons_shopping = Beta('cons for shopping',0,-10,10,0)
cons_other = Beta('cons for other',0,-10,10,0)
cons_Q = Beta('cons for quit',0,-10,10,1)
first_stop_inbound= Beta('dummy for first stop of inbound half tour', 0,-10,10,1)
second_stop_inbound= Beta('dummy for second stop of inbound half tour',0,-10,10,0)
threeplus_stop_inbound=Beta('dummy for 3+ stop of inbound half tour',0,-10,10,0)
first_stop_outbound= Beta('dummy for first stop of outbound half tour', 0,-10,10,0)
second_stop_outbound= Beta('dummy for second stop of outbound half tour',0,-10,10,0)
threeplus_stop_outbound=Beta('dummy for 3+ stop of outbound half tour',0,-10,10,0)
work_tour_dummy_Q=Beta('work tour dummy in quit',0,-10,10,1)
edu_tour_dummy_Q=Beta('edu tour dummy in quit',0,-10,10,1)
shopping_tour_dummy_Q=Beta('shopping tour dummy in quit',0,-10,10,1)
other_tour_dummy_Q=Beta('other tour dummy in quit',0,-10,10,1)
first_tour_dummy_Q=Beta('first tour dummy in quit',0,-10,10,0)
sub_tour_dummy_Q=Beta('has subtour dummy in quit',0,-10,10,0)
zero_tour_remain_Q=Beta('zero tour remain dummy',0,-10,10,1)
one_tour_remain_Q=Beta('one tour remain dummy',0,-10,10,0)
twoplus_tour_remain_Q=Beta('2+ tour remain dummy',0,-10,10,1)
work_tour_dummy_W=Beta('work tour dummy in work',0,-10,10,1)
edu_tour_dummy_W=Beta('edu tour dummy in work',0,-10,10,1)
shopping_tour_dummy_W=Beta('shopping tour dummy in work',0,-10,10,1)
other_tour_dummy_W=Beta('other tour dummy in work',0,-10,10,1)
female_dummy_W=Beta('female dummy in work',0,-10,10,0)
student_dummy_W=Beta('student dummy in work',0,-10,10,1)
worker_dummy_W=Beta('worker dummy in work',0,-10,10,1)
driver_dummy_W=Beta('driver dummy in work',0,-10,10,0)
passenger_dummy_W=Beta('passenger dummy in work',0,-10,10,0)
public_dummy_W=Beta('PT dummy in work',0,-10,10,0)
work_tour_dummy_E=Beta('work tour dummy in edu',0,-10,10,1)
edu_tour_dummy_E=Beta('edu tour dummy in edu',0,-10,10,1)
shopping_tour_dummy_E=Beta('shopping tour dummy in edu',0,-10,10,1)
other_tour_dummy_E=Beta('other tour dummy in edu',0,-10,10,1)
female_dummy_E=Beta('female dummy in edu',0,-10,10,0)
student_dummy_E=Beta('student dummy in edu',0,-10,10,1)
worker_dummy_E=Beta('worker dummy in edu',0,-10,10,1)
driver_dummy_E=Beta('driver dummy in edu',0,-10,10,0)
passenger_dummy_E=Beta('passenger dummy in edu',0,-10,10,0)
public_dummy_E=Beta('PT dummy in edu',0,-10,10,0)
work_tour_dummy_S=Beta('work tour dummy in shopping',0,-10,10,1)
edu_tour_dummy_S=Beta('edu tour dummy in shopping',0,-10,10,1)
shopping_tour_dummy_S=Beta('shopping tour dummy in shopping',0,-10,10,1)
other_tour_dummy_S=Beta('other tour dummy in shopping',0,-10,10,0)
female_dummy_S=Beta('female dummy in shopping',0,-10,10,0)
student_dummy_S=Beta('student dummy in shopping',0,-10,10,1)
worker_dummy_S=Beta('worker dummy in shopping',0,-10,10,0)
driver_dummy_S=Beta('driver dummy in shopping',0,-10,10,0)
passenger_dummy_S=Beta('passenger dummy in shopping',0,-10,10,0)
public_dummy_S=Beta('PT dummy in shopping',0,-10,10,0)
work_tour_dummy_O=Beta('work tour dummy in other',0,-10,10,0)
edu_tour_dummy_O=Beta('edu tour dummy in other',0,-10,10,0)
shopping_tour_dummy_O=Beta('shopping tour dummy in other',0,-10,10,0)
other_tour_dummy_O=Beta('other tour dummy in other',0,-10,10,1)
female_dummy_O=Beta('female dummy in other',0,-10,10,0)
student_dummy_O=Beta('student dummy in other',0,-10,10,0)
worker_dummy_O=Beta('worker dummy in other',0,-10,10,0)
driver_dummy_O=Beta('driver dummy in other',0,-10,10,0)
passenger_dummy_O=Beta('passenger dummy in other',0,-10,10,0)
public_dummy_O=Beta('PT dummy in other',0,-10,10,0)
work_logsum=Beta('work logsum in work',0,-10,10,1)
edu_logsum=Beta('edu logsum in edu',0,-10,10,1)
shop_logsum=Beta('shop logsum in shop',0,-10,10,1)
other_logsum=Beta('other logsum in other',0,-10,10,1)
time_window_work=Beta('time available in work',0,-10,10,1)
time_window_edu= Beta('time available in edu',0,-10,10,1)
time_window_shopping= Beta('time available in shopping',0,-10,10,1)
time_window_other= Beta('time available in other',0,-10,10,1)
tour_distance_work= Beta('log tour distance in work',0,-10,10,0)
tour_distance_edu= Beta('log tour distance in edu',0,-10,10,0)
tour_distance_shopping= Beta('log tour distance in shopping',0,-10,10,0)
tour_distance_other=Beta('log tour distance in other',0,-10,10,0)
a700_a930_work= Beta('period 7am to 9:30am in work',0,-10,10,0)
a930_a1200_work=Beta('period 9:30am to 12pm in work',0,-10,10,0)
p300_p530_work=Beta('period 3pm to 5:30pm in work',0,-10,10,0)
p530_p730_work=Beta('period 5:30pm to 7:30 pm in work',0,-10,10,0)
p730_p1000_work=Beta('period 7:30pm to 10pm in work',0,-10,10,0)
p1000_a700_work=Beta('period 10pm to 7am in work',0,-10,10,0)
a700_a930_edu= Beta('period 7am to 9:30am in edu',0,-10,10,0)
a930_a1200_edu=Beta('period 9:30am to 12pm in edu',0,-10,10,0)
p300_p530_edu=Beta('period 3pm to 5:30pm in edu',0,-10,10,0)
p530_p730_edu=Beta('period 5:30pm to 7:30 pm in edu',0,-10,10,0)
p730_p1000_edu=Beta('period 7:30pm to 10pm in edu',0,-10,10,0)
p1000_a700_edu=Beta('period 10pm to 7am in edu',0,-10,10,0)
a700_a930_shopping= Beta('period 7am to 9:30am in shopping',0,-10,10,0)
a930_a1200_shopping=Beta('period 9:30am to 12pm in shopping',0,-10,10,0)
p300_p530_shopping=Beta('period 3pm to 5:30pm in shopping',0,-10,10,0)
p530_p730_shopping=Beta('period 5:30pm to 7:30 pm in shopping',0,-10,10,0)
p730_p1000_shopping=Beta('period 7:30pm to 10pm in shopping',0,-10,10,0)
p1000_a700_shopping=Beta('period 10pm to 7am in shopping',0,-10,10,0)
a700_a930_other= Beta('period 7am to 9:30am in other',0,-10,10,0)
a930_a1200_other=Beta('period 9:30am to 12pm in other',0,-10,10,0)
p300_p530_other=Beta('period 3pm to 5:30pm in other',0,-10,10,0)
p530_p730_other=Beta('period 5:30pm to 7:30 pm in other',0,-10,10,0)
p730_p1000_other=Beta('period 7:30pm to 10pm in other',0,-10,10,0)
p1000_a700_other=Beta('period 10pm to 7am in other',0,-10,10,0)
MU1 = Beta('MU for quit',1,0,100,1)
MU2 = Beta('MU for non-quit', 1.0,0,100,1)
#V for work
V_work= cons_work+\
work_tour_dummy_W*1*(tour_type==1)+\
edu_tour_dummy_W*1*(tour_type==2)+\
shopping_tour_dummy_W*1*(tour_type==3)+\
other_tour_dummy_W*1*(tour_type==4)+\
female_dummy_W*female_dummy+\
student_dummy_W*student_dummy+\
worker_dummy_W*worker_dummy+\
driver_dummy_W*driver_dummy+\
passenger_dummy_W*passenger_dummy+\
public_dummy_W*public_dummy+\
work_logsum * worklogsum+\
time_window_work*time_window_h+\
tour_distance_work*log(1+distance)+\
a700_a930_work*p_700a_930a+\
a930_a1200_work*p_930a_1200a+\
p300_p530_work*p_300p_530p+\
p530_p730_work*p_530p_730p+\
p730_p1000_work*p_730p_1000p+\
p1000_a700_work*p_1000p_700a
#V for education
V_edu = cons_edu+\
work_tour_dummy_E*1*(tour_type==1)+\
edu_tour_dummy_E*1*(tour_type==2)+\
shopping_tour_dummy_E*1*(tour_type==3)+\
other_tour_dummy_E*1*(tour_type==4)+\
female_dummy_E*female_dummy+\
student_dummy_E*student_dummy+\
worker_dummy_E*worker_dummy+\
driver_dummy_E*driver_dummy+\
passenger_dummy_E*passenger_dummy+\
public_dummy_E*public_dummy+\
edu_logsum * edulogsum+\
time_window_edu*time_window_h+\
tour_distance_edu*log(1+distance)+\
a700_a930_edu*p_700a_930a+\
a930_a1200_edu*p_930a_1200a+\
p300_p530_edu*p_300p_530p+\
p530_p730_edu*p_530p_730p+\
p730_p1000_edu*p_730p_1000p+\
p1000_a700_edu*p_1000p_700a
#V for shopping
V_shopping = cons_shopping+\
work_tour_dummy_S*1*(tour_type==1)+\
edu_tour_dummy_S*1*(tour_type==2)+\
shopping_tour_dummy_S*1*(tour_type==3)+\
other_tour_dummy_S*1*(tour_type==4)+\
female_dummy_S*female_dummy+\
student_dummy_S*student_dummy+\
worker_dummy_S*worker_dummy+\
driver_dummy_S*driver_dummy+\
passenger_dummy_S*passenger_dummy+\
public_dummy_S*public_dummy+\
shop_logsum * shoplogsum+\
time_window_shopping*time_window_h+\
tour_distance_shopping*log(1+distance)+\
a700_a930_shopping*p_700a_930a+\
a930_a1200_shopping*p_930a_1200a+\
p300_p530_shopping*p_300p_530p+\
p530_p730_shopping*p_530p_730p+\
p730_p1000_shopping*p_730p_1000p+\
p1000_a700_shopping*p_1000p_700a
#V for other
V_other=cons_other+\
work_tour_dummy_O*1*(tour_type==1)+\
edu_tour_dummy_O*1*(tour_type==2)+\
shopping_tour_dummy_O*1*(tour_type==3)+\
other_tour_dummy_O*1*(tour_type==4)+\
female_dummy_O*female_dummy+\
student_dummy_O*student_dummy+\
worker_dummy_O*worker_dummy+\
driver_dummy_O*driver_dummy+\
passenger_dummy_O*passenger_dummy+\
public_dummy_O*public_dummy+\
other_logsum * otherlogsum+\
time_window_other*time_window_h+\
tour_distance_other*log(1+distance)+\
a700_a930_other*p_700a_930a+\
a930_a1200_other*p_930a_1200a+\
p300_p530_other*p_300p_530p+\
p530_p730_other*p_530p_730p+\
p730_p1000_other*p_730p_1000p+\
p1000_a700_other*p_1000p_700a
#V for quit
V_quit= cons_Q+first_stop_inbound*first_stop*first_bound+\
second_stop_inbound*second_stop*first_bound+\
threeplus_stop_inbound*three_plus_stop*first_bound+\
first_stop_outbound*first_stop*second_bound+\
second_stop_outbound*second_stop*second_bound+\
threeplus_stop_outbound*three_plus_stop*second_bound+\
work_tour_dummy_Q*1*(tour_type==1)+\
edu_tour_dummy_Q*1*(tour_type==2)+\
shopping_tour_dummy_Q*1*(tour_type==3)+\
other_tour_dummy_Q*1*(tour_type==4)+\
first_tour_dummy_Q*first_tour_dummy+\
sub_tour_dummy_Q*has_subtour+zero_tour_remain_Q*1*(tour_remain==0)+\
one_tour_remain_Q*1*(tour_remain==1)+twoplus_tour_remain_Q*1*(tour_remain>=2)
V = {0:V_quit,1: V_work,2:V_edu,3:V_shopping,4:V_other}
av= {0:avail_quit,1:avail_workstop,2:avail_edustop,3:avail_shopstop,4:avail_otherstop}
nest_quit = MU1 , [0]
nest_nonquit = MU2 , [1,2,3,4]
nests=nest_quit,nest_nonquit
prob = nested(V,av,nests,stop_type)
#prob = bioLogit(V,av,stop_type)
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(log(prob),'obsIter')
exclude = ((avail_violation==1)+(origin_mtz==0)+(destination_mtz==0)+(time_window_h>=10)) > 0
BIOGEME_OBJECT.EXCLUDE = exclude
nullLoglikelihood(av,'obsIter')
choiceSet = [0,1,2,3,4]
cteLoglikelihood(choiceSet,stop_type,'obsIter')
availabilityStatistics(av,'obsIter')
BIOGEME_OBJECT.PARAMETERS['optimizationAlgorithm'] = "CFSQP"
BIOGEME_OBJECT.PARAMETERS['checkDerivatives'] = "1"
BIOGEME_OBJECT.PARAMETERS['numberOfThreads'] = "6" | 26.098086 | 94 | 0.735356 |
13a612e58dbc2d528c1a7b3b41902e48fed28775 | 739 | py | Python | HRMS/app/__init__.py | freestyletime/HumanResourceManagement | 4ec7f453fdae28d1a412d740849c9ee186757df8 | [
"MIT"
] | 1 | 2021-12-24T18:47:20.000Z | 2021-12-24T18:47:20.000Z | HRMS/app/__init__.py | freestyletime/HumanResourceManagement | 4ec7f453fdae28d1a412d740849c9ee186757df8 | [
"MIT"
] | null | null | null | HRMS/app/__init__.py | freestyletime/HumanResourceManagement | 4ec7f453fdae28d1a412d740849c9ee186757df8 | [
"MIT"
] | null | null | null | #
from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
#
db = SQLAlchemy()
# app
| 19.972973 | 39 | 0.705007 |
13a7639121bf30f43e2268fe97c114969714293e | 1,225 | py | Python | listener/src/ethereum_connection.py | NicolasMenendez/oracles-dashboard | 789e4a771c9f7064a19a85ef1b4f44bcbbac1a10 | [
"MIT"
] | null | null | null | listener/src/ethereum_connection.py | NicolasMenendez/oracles-dashboard | 789e4a771c9f7064a19a85ef1b4f44bcbbac1a10 | [
"MIT"
] | null | null | null | listener/src/ethereum_connection.py | NicolasMenendez/oracles-dashboard | 789e4a771c9f7064a19a85ef1b4f44bcbbac1a10 | [
"MIT"
] | 1 | 2020-01-17T12:38:39.000Z | 2020-01-17T12:38:39.000Z | import json
import web3
| 22.685185 | 86 | 0.646531 |
13a86b3246874d1785eb144fa791b1d302c19c30 | 60,991 | py | Python | ross/stochastic/st_results.py | JuliaMota/ross | 88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8 | [
"MIT"
] | null | null | null | ross/stochastic/st_results.py | JuliaMota/ross | 88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8 | [
"MIT"
] | null | null | null | ross/stochastic/st_results.py | JuliaMota/ross | 88c2fa69d9a583dcdc33eab8deb35c797ebf4ef8 | [
"MIT"
] | null | null | null | """STOCHASTIC ROSS plotting module.
This module returns graphs for each type of analyses in st_rotor_assembly.py.
"""
import numpy as np
from plotly import express as px
from plotly import graph_objects as go
from plotly import io as pio
from plotly.subplots import make_subplots
from ross.plotly_theme import tableau_colors
pio.renderers.default = "browser"
# set Plotly palette of colors
colors1 = px.colors.qualitative.Dark24
colors2 = px.colors.qualitative.Light24
| 36.763713 | 92 | 0.480202 |
13a8c046daa5c36fb60a09676a0be10c4e57fb9f | 4,227 | py | Python | code/prisonersDilemma.py | ben9583/PrisonersDilemmaTournament | 8227c05f835c93a0b30feb4207a7d7c631e670a0 | [
"MIT"
] | 1 | 2021-09-16T03:38:21.000Z | 2021-09-16T03:38:21.000Z | code/prisonersDilemma.py | ben9583/PrisonersDilemmaTournament | 8227c05f835c93a0b30feb4207a7d7c631e670a0 | [
"MIT"
] | null | null | null | code/prisonersDilemma.py | ben9583/PrisonersDilemmaTournament | 8227c05f835c93a0b30feb4207a7d7c631e670a0 | [
"MIT"
] | null | null | null | import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
| 37.078947 | 210 | 0.62503 |
13a92427a8cdec440aec42402a7483f2303b73a6 | 10,075 | py | Python | json_to_relation/mysqldb.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | 4 | 2015-10-10T19:09:49.000Z | 2021-09-02T00:58:06.000Z | json_to_relation/mysqldb.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | null | null | null | json_to_relation/mysqldb.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | 8 | 2015-05-16T14:33:33.000Z | 2019-10-24T08:56:25.000Z | # Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Sep 24, 2013
@author: paepcke
Modifications:
- Dec 30, 2013: Added closing of connection to close() method
'''
import re
import subprocess
import tempfile
import pymysql
#import MySQLdb
| 40.461847 | 757 | 0.60794 |
13a95b957fe3881893fa91e63fed84b8224215f9 | 611 | py | Python | tools/xkeydump.py | treys/crypto-key-derivation | 789900bd73160db9a0d406c7c7f00f5f299aff73 | [
"MIT"
] | 29 | 2017-11-12T08:54:03.000Z | 2022-03-04T21:12:00.000Z | tools/xkeydump.py | treys/crypto-key-derivation | 789900bd73160db9a0d406c7c7f00f5f299aff73 | [
"MIT"
] | 2 | 2019-03-01T05:56:52.000Z | 2021-05-17T00:18:01.000Z | tools/xkeydump.py | treys/crypto-key-derivation | 789900bd73160db9a0d406c7c7f00f5f299aff73 | [
"MIT"
] | 9 | 2018-04-10T08:40:25.000Z | 2021-12-29T16:04:48.000Z | #!./venv/bin/python
from lib.mbp32 import XKey
from lib.utils import one_line_from_stdin
xkey = XKey.from_xkey(one_line_from_stdin())
print(xkey)
print("Version:", xkey.version)
print("Depth:", xkey.depth)
print("Parent FP:", xkey.parent_fp.hex())
print("Child number:", xkey.child_number_with_tick())
print("Chain code:", xkey.chain_code.hex())
print("Key:", xkey.key)
if xkey.key.get_private_bytes():
print("Private bytes:", xkey.key.get_private_bytes().hex())
print("Public bytes:", xkey.key.get_public_bytes().hex())
print("Key ID:", xkey.keyid().hex())
print("XKey:", xkey.to_xkey().decode('ascii'))
| 32.157895 | 63 | 0.721768 |
13aa5a46812a4881dac4a4f78ba8019d3b73841d | 616 | py | Python | examples/compute_angular_resolution.py | meder411/Tangent-Images | 6def4d7b8797110e54f7faa2435973771d9e9722 | [
"BSD-3-Clause"
] | 57 | 2019-12-20T09:28:29.000Z | 2022-03-28T02:38:48.000Z | examples/compute_angular_resolution.py | meder411/Tangent-Images | 6def4d7b8797110e54f7faa2435973771d9e9722 | [
"BSD-3-Clause"
] | 6 | 2020-06-06T16:39:35.000Z | 2021-01-21T01:19:52.000Z | examples/compute_angular_resolution.py | meder411/Tangent-Images | 6def4d7b8797110e54f7faa2435973771d9e9722 | [
"BSD-3-Clause"
] | 16 | 2019-12-21T08:19:33.000Z | 2022-03-28T02:38:49.000Z | from spherical_distortion.util import *
sample_order = 9 # Input resolution to examine
ang_fov(sample_order) | 36.235294 | 77 | 0.625 |
13abd087d6a8034a9d1a9da08f2dab574fb7be66 | 51,194 | py | Python | polymath/srdfg/base.py | he-actlab/polymath | 9b7937d0ddf7452f6cc74ee90d05f8c6acef737e | [
"Apache-2.0"
] | 15 | 2021-05-09T05:46:04.000Z | 2022-03-06T20:46:32.000Z | polymath/srdfg/base.py | he-actlab/polymath | 9b7937d0ddf7452f6cc74ee90d05f8c6acef737e | [
"Apache-2.0"
] | null | null | null | polymath/srdfg/base.py | he-actlab/polymath | 9b7937d0ddf7452f6cc74ee90d05f8c6acef737e | [
"Apache-2.0"
] | 4 | 2021-08-24T07:46:29.000Z | 2022-03-05T18:23:07.000Z |
from polymath import UNSET_SHAPE, DEFAULT_SHAPES
import builtins
import operator
from collections import OrderedDict, Mapping, Sequence, deque
import functools
from numbers import Integral, Rational, Real
import contextlib
import traceback
import uuid
import numpy as np
import importlib
from .graph import Graph
from .domain import Domain
from .util import _noop_callback, _flatten_iterable, node_hash, \
_is_node_type_instance, is_iterable
def instantiate_node(self, node): # pylint:disable=W0621
"""
Instantiate nodes by retrieving the node object associated with the node name.
Parameters
----------
node : Node or str
Node instance or name of an node.
Returns
-------
instantiated_node : Node
Node instance.
Raises
------
ValueError
If `node` is not an `Node` instance or an node name.
RuntimeError
If `node` is an `Node` instance but does not belong to this graph.
"""
if isinstance(node, str):
return self.nodes[node]
if isinstance(node, Node):
if node.name not in self.nodes and (node.graph != self):
raise RuntimeError(f"node '{node}' does not belong to {self} graph, instead belongs to"
f" {node.graph}")
return node
raise ValueError(f"'{node}' is not an `Node` instance or node name")
def instantiate_graph(self, context, **kwargs):
"""
Instantiate a graph by replacing all node names with node instances.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
context : dict[Node or str, object]
Context whose keys are node instances or names.
kwargs : dict[str, object]
Additional context information keyed by variable name.
Returns
-------
normalized_context : dict[Node, object]
Normalized context whose keys are node instances.
Raises
------
ValueError
If the context specifies more than one value for any node.
ValueError
If `context` is not a mapping.
"""
if context is None:
context = {}
elif not isinstance(context, Mapping):
raise ValueError("`context` must be a mapping.")
nodes = list(context)
# Add the keyword arguments
for node in nodes: # pylint:disable=W0621
value = context.pop(node)
node = self.instantiate_node(node)
if node in context:
raise ValueError(f"duplicate unequal value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
for name, value in kwargs.items():
node = self.nodes[name]
if node in context:
raise ValueError(f"duplicate value for node '{node}'")
context[node] = value
if node.op_name in ["placeholder", "state", "input", "output", "temp"] and not node.is_shape_finalized():
context[node] = node.evaluate(context)
return context
def run(self, fetches, context=None, *, callback=None, **kwargs):
"""
Evaluate one or more nodes given a dictionary of node names with their values.
.. note::
This function modifies the context in place. Use :code:`context=context.copy()` to avoid
the context being modified.
Parameters
----------
fetches : list[str or Node] or str or Node
One or more `Node` instances or names to evaluate.
context : dict or None
Context in which to evaluate the nodes.
callback : callable or None
Callback to be evaluated when an node is evaluated.
kwargs : dict
Additional context information keyed by variable name.
Returns
-------
values : Node or tuple[object]
Output of the nodes given the context.
Raises
------
ValueError
If `fetches` is not an `Node` instance, node name, or a sequence thereof.
"""
if isinstance(fetches, (str, Node)):
fetches = [fetches]
single = True
elif isinstance(fetches, Sequence):
single = False
else:
raise ValueError("`fetches` must be an `Node` instance, node name, or a "
"sequence thereof.")
fetches = [self.instantiate_node(node) for node in fetches]
context = self.instantiate_graph(context, **kwargs)
for c in context:
if c in fetches and c.op_name in ["output", "state", "temp"]:
write_name = "/".join([f"{i}{c.write_count-1}" for i in c.name.split("/")]) if c.write_count > 0 else c.name
fetches[fetches.index(c)] = c.graph.nodes[write_name]
values = [fetch.evaluate_node(fetch, context, callback=callback) for fetch in fetches]
return values[0] if single else tuple(values)
def set_name(self, name):
"""
Set the name of the node and update the graph.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
name = name or uuid.uuid4().hex
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:\n\t"
f"Existing: {self.graph.nodes[name].args}\n\t"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def evaluate_dependencies(self, context, callback=None):
"""
Evaluate the dependencies of this node and discard the values.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
"""
for node in self.dependencies:
node.evaluate(context, callback)
def evaluate(self, context, callback=None):
"""
Evaluate the node given a context.
Parameters
----------
context : dict
Normalised context in which to evaluate the node.
callback : callable or None
Callback to be evaluated when an node is evaluated.
Returns
-------
value : object
Output of the node given the context.
"""
# Evaluate all explicit dependencies first
self.evaluate_dependencies(context, callback)
if self in context:
return context[self]
# Evaluate the parents
partial = functools.partial(self.evaluate_node, context=context, callback=callback)
args = [partial(arg) for arg in self.args]
kwargs = {key: partial(value) for key, value in self.kwargs.items() if key not in self.added_attrs}
# Evaluate the node
callback = callback or _noop_callback
with callback(self, context):
if self.__class__.__name__ == "Node":
context[self] = self.value = self._evaluate(*args, context=context, **kwargs)
else:
context[self] = self.value = self._evaluate(*args, **kwargs)
return self.value
def _evaluate(self, *args, context=None, **kwargs):
"""
Inheriting nodes should implement this function to evaluate the node.
"""
return self(*args, context, **kwargs)
def __bool__(self):
return True
def __hash__(self):
return id(self)
def func_hash(self):
"""
This returns the functional hash of a particular node. The default hash returns an object id, whereas this function
returns a hash of all attributes and subgraphs of a node.
"""
return node_hash(self)
def find_node(self, name):
g = self.graph
while g is not None and name not in g.nodes:
g = g.graph
if name in g.nodes:
return g.nodes[name]
raise RuntimeError(f"Cannot find {name} in graph nodes. Graph: {self.graph}")
def __len__(self):
#TODO: Update this to check for finalzied shape
if self.shape == UNSET_SHAPE:
raise TypeError(f'`shape` must be specified explicitly for nodes {self}')
return self.shape[0]
def __iter__(self):
num = len(self)
for i in range(num):
yield self[i]
class EvaluationError(RuntimeError):
"""
Failed to evaluate an node.
"""
class var_index(Node): # pylint: disable=C0103,W0223
"""
Node representing values of a variable corresponding to input index values.
Parameters
----------
var : Node
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
def set_name(self, name):
"""
Set the name for a variable index, making sure to replicate the new name with
a unique stringwhich corresponds to the variable, index combination.
Parameters
----------
value : str
Unique name of the node.
Returns
-------
self : Node
This node.
Raises
------
ValueError
If an node with `value` already exists in the associated graph.
KeyError
If the current name of the node cannot be found in the associated graph.
"""
# TODO: Need a way to check if the existing node is not equal to the current ndoe as ewll
if self.graph and name in self.graph.nodes:
raise ValueError(f"duplicate name '{name}' in {self.graph.name}:"
f"Existing: {self.graph.nodes[name].args}\n"
f"New: {self.args}")
if self.graph:
graph = self.graph
if self._name is not None and self._name in graph.nodes:
graph.update_graph_key(self._name, name)
else:
graph.nodes[name] = self
self._name = name
return self
def __getitem__(self, key):
if self.is_shape_finalized() and len(self.nodes) >= np.prod(self.shape):
if isinstance(key, Integral):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.nodes.item_by_index(idx)
return ret
else:
if isinstance(key, (list)):
ret = var_index(self.var, tuple(key), graph=self)
elif isinstance(key, tuple):
ret = var_index(self.var, key, graph=self)
else:
ret = var_index(self.var, tuple([key]), graph=self)
return ret
def is_scalar(self, val=None):
if val is not None and (not isinstance(val, np.ndarray) or (len(val.shape) == 1 and val.shape[0] == 1)):
if self.var.shape != DEFAULT_SHAPES[0] and (len(self.var.shape) == 1 and not isinstance(self.var.shape[0],Node)):
raise ValueError(f"Invalid shape var for var index {self} with variable shape {self.var.shape}")
return True
else:
return self.var.shape == DEFAULT_SHAPES[0]
def _evaluate(self, var, indices, **kwargs):
if self.is_scalar(var):
out_shape = (1,)
indices = (0,)
single = True
else:
out_shape = self.domain.shape_from_indices(indices)
indices = self.domain.compute_pairs()
single = False
if isinstance(var, (Integral, Real, str)):
var = np.asarray([var])
elif not isinstance(var, (np.ndarray, list)):
raise TypeError(f"Variable {var} with type {type(var)} is not a list or numpy array, and cannot be sliced for {self.name}")
elif isinstance(var, list):
var = np.asarray(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) == np.prod(out_shape):
if len(out_shape) > len(var.shape):
for i in range(len(out_shape)):
if out_shape[i] == 1:
var = np.expand_dims(var, axis=i)
else:
var = np.squeeze(var)
if len(var.shape) != len(out_shape) and np.prod(var.shape) != np.prod(out_shape):
raise ValueError(f"Index list does not match {var.shape} in {self.var.name} - {self.var.op_name}"
f"dimensions for slice {self.args[0].name} with {out_shape}.\n"
f"Domain: {self.domain}\n"
f"Eval Stack: {Node._eval_stack}")
if not single and not all([(idx_val - 1) >= indices[-1][idx] for idx, idx_val in enumerate(var.shape)]):
raise ValueError(f"var_index {self.name} has indices which are greater than the variable shape:\n"
f"\tArgs: {self.args}\n"
f"\tVar shape: {var.shape}\n"
f"\tNode shape: {self.var.shape}\n"
f"\tIndex Upper bounds: {indices[-1]}")
indices = list(map(lambda x: x.tolist() if isinstance(x, np.ndarray) else x, indices))
res = var[indices] if single else np.asarray([var[idx] for idx in indices]).reshape(out_shape)
if out_shape == (1,) and len(indices) == 1:
res = res[0]
self.domain.set_computed(out_shape, indices)
return res
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return "<var_index name=%s, index=%s>" % (self.name, self.args)
class slice_op(Node):
"""
Node representing multi-dimensional operations performed on a node.
Parameters
----------
target : cal
The multi-dimensional variable used for indexing into.
idx : tuple
Tuple of either integer values or index/index_op nodes.
"""
class func_op(Node): # pylint: disable=C0103,R0903
"""
Node wrapper for stateless functions.
Parameters
----------
target : callable
function to evaluate the node
args : tuple
positional arguments passed to the target
kwargs : dict
keywoard arguments passed to the target
"""
def __getitem__(self, key):
return self
def nodeop(target=None, **kwargs):
"""
Decorator for creating nodes from functions.
"""
# This is called when the decorator is used with arguments
if target is None:
return functools.partial(nodeop, **kwargs)
# This is called when the decorator is used without arguments
return _wrapper
#pylint: disable=C0103
abs_ = nodeop(builtins.abs)
dict_ = nodeop(builtins.dict)
help_ = nodeop(builtins.help)
min_ = nodeop(builtins.min)
setattr_ = nodeop(builtins.setattr)
all_ = nodeop(builtins.all)
dir_ = nodeop(builtins.dir)
hex_ = nodeop(builtins.hex)
next_ = nodeop(builtins.next)
slice_ = nodeop(builtins.slice)
any_ = nodeop(builtins.any)
divmod_ = nodeop(builtins.divmod)
id_ = nodeop(builtins.id)
object_ = nodeop(builtins.object)
sorted_ = nodeop(builtins.sorted)
ascii_ = nodeop(builtins.ascii)
enumerate_ = nodeop(builtins.enumerate)
input_ = nodeop(builtins.input)
oct_ = nodeop(builtins.oct)
staticmethod_ = nodeop(builtins.staticmethod)
bin_ = nodeop(builtins.bin)
eval_ = nodeop(builtins.eval)
int_ = nodeop(builtins.int)
open_ = nodeop(builtins.open)
str_ = nodeop(builtins.str)
bool_ = nodeop(builtins.bool)
exec_ = nodeop(builtins.exec)
isinstance_ = nodeop(builtins.isinstance)
ord_ = nodeop(builtins.ord)
sum_ = nodeop(builtins.sum)
bytearray_ = nodeop(builtins.bytearray)
filter_ = nodeop(builtins.filter)
issubclass_ = nodeop(builtins.issubclass)
pow_ = nodeop(builtins.pow)
super_ = nodeop(builtins.super)
bytes_ = nodeop(builtins.bytes)
float_ = nodeop(builtins.float)
iter_ = nodeop(builtins.iter)
print_ = nodeop(builtins.print)
tuple_ = nodeop(builtins.tuple)
callable_ = nodeop(builtins.callable)
format_ = nodeop(builtins.format)
len_ = nodeop(builtins.len)
property_ = nodeop(builtins.property)
type_ = nodeop(builtins.type)
chr_ = nodeop(builtins.chr)
frozenset_ = nodeop(builtins.frozenset)
list_ = nodeop(builtins.list)
range_ = nodeop(builtins.range)
vars_ = nodeop(builtins.vars)
classmethod_ = nodeop(builtins.classmethod)
getattr_ = nodeop(builtins.getattr)
locals_ = nodeop(builtins.locals)
repr_ = nodeop(builtins.repr)
zip_ = nodeop(builtins.zip)
compile_ = nodeop(builtins.compile)
globals_ = nodeop(builtins.globals)
map_ = nodeop(builtins.map)
reversed_ = nodeop(builtins.reversed)
complex_ = nodeop(builtins.complex)
hasattr_ = nodeop(builtins.hasattr)
max_ = nodeop(builtins.max)
round_ = nodeop(builtins.round)
delattr_ = nodeop(builtins.delattr)
hash_ = nodeop(builtins.hash)
memoryview_ = nodeop(builtins.memoryview)
set_ = nodeop(builtins.set)
add = nodeop(operator.add)
and_ = nodeop(operator.and_)
attrgetter = nodeop(operator.attrgetter)
concat = nodeop(operator.concat)
contains = nodeop(operator.contains)
countOf = nodeop(operator.countOf)
delitem = nodeop(operator.delitem)
eq = nodeop(operator.eq)
floordiv = nodeop(operator.floordiv)
ge = nodeop(operator.ge)
getitem = nodeop(operator.getitem)
gt = nodeop(operator.gt)
index = nodeop(operator.index)
indexOf = nodeop(operator.indexOf)
inv = nodeop(operator.inv)
invert = nodeop(operator.invert)
ior = nodeop(operator.ior)
ipow = nodeop(operator.ipow)
irshift = nodeop(operator.irshift)
is_ = nodeop(operator.is_)
is_not = nodeop(operator.is_not)
itemgetter = nodeop(operator.itemgetter)
le = nodeop(operator.le)
length_hint = nodeop(operator.length_hint)
lshift = nodeop(operator.lshift)
lt = nodeop(operator.lt)
matmul = nodeop(operator.matmul)
methodcaller = nodeop(operator.methodcaller)
mod = nodeop(operator.mod)
mul = nodeop(operator.mul)
ne = nodeop(operator.ne)
neg = nodeop(operator.neg)
not_ = nodeop(operator.not_)
or_ = nodeop(operator.or_)
pos = nodeop(operator.pos)
rshift = nodeop(operator.rshift)
setitem = nodeop(operator.setitem)
sub = nodeop(operator.sub)
truediv = nodeop(operator.truediv)
truth = nodeop(operator.truth)
xor = nodeop(operator.xor)
import_ = nodeop(importlib.import_module)
| 35.725052 | 158 | 0.607239 |
13adaa6947a03ff103153b1b8c14da5ac9aaa4b0 | 392 | py | Python | actors/models.py | rngallen/beyond_basics | 2cfb7d97699a733251e68357a70eada3d0278680 | [
"MIT"
] | null | null | null | actors/models.py | rngallen/beyond_basics | 2cfb7d97699a733251e68357a70eada3d0278680 | [
"MIT"
] | null | null | null | actors/models.py | rngallen/beyond_basics | 2cfb7d97699a733251e68357a70eada3d0278680 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
# Create your models here.
| 28 | 81 | 0.729592 |
13aef1dee9a4b31316309780ab660b17ad23d9b2 | 1,285 | py | Python | docs/buildscripts/docs.py | cwlalyy/mongo-c-driver | d771be13bc8f7d8b84d233de6fdc725d9bb337cc | [
"Apache-2.0"
] | 13 | 2016-07-14T16:36:59.000Z | 2018-06-01T18:06:14.000Z | docs/buildscripts/docs.py | cwlalyy/mongo-c-driver | d771be13bc8f7d8b84d233de6fdc725d9bb337cc | [
"Apache-2.0"
] | null | null | null | docs/buildscripts/docs.py | cwlalyy/mongo-c-driver | d771be13bc8f7d8b84d233de6fdc725d9bb337cc | [
"Apache-2.0"
] | 9 | 2015-01-26T09:30:41.000Z | 2016-03-15T14:48:18.000Z | """Build the C client docs.
"""
from __future__ import with_statement
import os
import shutil
import socket
import subprocess
import time
import urllib2
def version():
"""Get the driver version from doxygenConfig.
"""
with open("doxygenConfig") as f:
for line in f.readlines():
if line.startswith("PROJECT_NUMBER"):
return line.split("=")[1].strip()
if __name__ == "__main__":
main()
| 22.155172 | 79 | 0.636576 |
13afb94123467877efe58458ed4c502384fb753b | 1,921 | py | Python | tilegame/render/rs.py | defgsus/thegame | 38a627d9108f1418b94b08831fd640dd87fbba83 | [
"MIT"
] | 1 | 2021-11-05T11:49:26.000Z | 2021-11-05T11:49:26.000Z | tilegame/render/rs.py | defgsus/thegame | 38a627d9108f1418b94b08831fd640dd87fbba83 | [
"MIT"
] | null | null | null | tilegame/render/rs.py | defgsus/thegame | 38a627d9108f1418b94b08831fd640dd87fbba83 | [
"MIT"
] | null | null | null | import glm
import math
from lib.opengl import RenderSettings
| 26.315068 | 90 | 0.563248 |
13afbf984a23501bfd2bc3f3a3b28ed6375a3779 | 4,865 | py | Python | tools/stats/export_slow_tests.py | stungkit/pytorch | 0f05e398705bf15406bce79f7ee57d3935ad2abd | [
"Intel"
] | 2 | 2020-03-13T06:57:49.000Z | 2020-05-17T04:18:14.000Z | tools/stats/export_slow_tests.py | ellhe-blaster/pytorch | e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25 | [
"Intel"
] | 1 | 2022-01-10T18:39:28.000Z | 2022-01-10T19:15:57.000Z | tools/stats/export_slow_tests.py | ellhe-blaster/pytorch | e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25 | [
"Intel"
] | 1 | 2022-03-26T14:42:50.000Z | 2022-03-26T14:42:50.000Z | #!/usr/bin/env python3
import argparse
import json
import os
import statistics
from collections import defaultdict
from tools.stats.s3_stat_parser import (
get_previous_reports_for_branch,
Report,
Version2Report,
)
from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen
SLOW_TESTS_FILE = ".pytorch-slow-tests.json"
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
IGNORED_JOBS = ["asan", "periodic"]
if __name__ == "__main__":
main()
| 36.856061 | 127 | 0.658787 |
13b0a600d04c4f624b452b2451a329662c1e6704 | 14,128 | py | Python | ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py | michaeltashman/Horizon | ee310b34adeb807bbae379a6e1703d0f725f26a9 | [
"BSD-3-Clause"
] | 1 | 2020-07-30T06:15:20.000Z | 2020-07-30T06:15:20.000Z | ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py | michaeltashman/Horizon | ee310b34adeb807bbae379a6e1703d0f725f26a9 | [
"BSD-3-Clause"
] | null | null | null | ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py | michaeltashman/Horizon | ee310b34adeb807bbae379a6e1703d0f725f26a9 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T15:52:18.000Z | 2019-06-05T15:52:18.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
| 37.674667 | 106 | 0.613675 |
13b1070ba0b79397d1d29eb7190ddb42cc92bb2c | 678 | py | Python | LeetCode/2019-08-03-384-Shuffle-an-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 5 | 2018-10-30T05:07:32.000Z | 2019-06-18T08:11:38.000Z | LeetCode/2019-08-03-384-Shuffle-an-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 1 | 2020-05-09T09:05:16.000Z | 2020-05-09T09:05:16.000Z | LeetCode/2019-08-03-384-Shuffle-an-Array.py | HeRuivio/-Algorithm | 1fbe6256630758fda3af68f469471ee246730afc | [
"MIT"
] | 2 | 2020-05-09T09:02:22.000Z | 2020-12-09T13:23:00.000Z | # -*- coding: utf-8 -*-
# @Author:
# @Create Date: 2019-08-03 10:48:30
# @Last Modified by:
# @Last Modified time: 2019-08-03 10:53:15
import copy
import random
from typing import List
| 21.870968 | 69 | 0.581121 |
13b20f00d86e94eb5e2fb0079121846394df7d95 | 5,255 | py | Python | src/wspc/feature_selection.py | shakedna1/wspc_rep | f4492af8cec25a3f7b00687c08d30754a1c0c91f | [
"MIT"
] | null | null | null | src/wspc/feature_selection.py | shakedna1/wspc_rep | f4492af8cec25a3f7b00687c08d30754a1c0c91f | [
"MIT"
] | null | null | null | src/wspc/feature_selection.py | shakedna1/wspc_rep | f4492af8cec25a3f7b00687c08d30754a1c0c91f | [
"MIT"
] | null | null | null | import numpy as np
import sklearn
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
def get_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
| 33.259494 | 120 | 0.664891 |
13b2ef5da8cb4bdd6ae2ffffe9632e5405ed5cb0 | 5,985 | py | Python | Python3/PS_scraping_selenium.py | fsj-digital/pages | 8360f27e67974ed2b4f39eb64377f39c0189a224 | [
"MIT"
] | 5 | 2019-10-28T19:09:16.000Z | 2021-08-19T07:44:54.000Z | Python3/PS_scraping_selenium.py | fsj-digital/pages | 8360f27e67974ed2b4f39eb64377f39c0189a224 | [
"MIT"
] | null | null | null | Python3/PS_scraping_selenium.py | fsj-digital/pages | 8360f27e67974ed2b4f39eb64377f39c0189a224 | [
"MIT"
] | 6 | 2020-04-28T22:33:06.000Z | 2021-06-22T15:53:52.000Z | from bs4 import BeautifulSoup
import requests
import re
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.touch_actions import TouchActions
from selenium.common.exceptions import TimeoutException
URL = 'https://shopping.thinkwithgoogle.com'
EXAMPLES = ["Demonstrate unexpected use-case",
"Demonstrate google search",
"Demonstrate search on thinkwithgoogle",
"Demonstrate search on WebDriverWait",
"Demonstrate search on thinkwithgoogle search result",
"Download and extract additional data",
"Demonstrate maximizing screen",
"Demonstrate mouse actions for Chrome",
"Demonstrate navigation"]
if __name__ == '__main__':
while(True):
printSelection()
choice = input('Enter choice: ')
try:
choice = int(choice)
except ValueError:
print('Invalid input, stop program')
break
if(choice not in range(0,9)):
print('Invalid input, stop program')
break
run(int(choice), URL)
| 43.057554 | 143 | 0.637594 |
13b41f50da86c6a2be3204ada5e6385e678b7b05 | 5,531 | py | Python | AppTest/testTCPserver.py | STRATOLOGIC/SpacePyLibrary | 89fc3873c6d787ad4e391f6080d9dd3218ffc4a2 | [
"MIT"
] | 22 | 2015-01-22T13:40:22.000Z | 2022-02-19T02:03:12.000Z | AppTest/testTCPserver.py | STRATOLOGIC/SpacePyLibrary | 89fc3873c6d787ad4e391f6080d9dd3218ffc4a2 | [
"MIT"
] | 3 | 2018-09-28T13:14:40.000Z | 2022-02-08T14:19:13.000Z | AppTest/testTCPserver.py | STRATOLOGIC/SpacePyLibrary | 89fc3873c6d787ad4e391f6080d9dd3218ffc4a2 | [
"MIT"
] | 11 | 2016-06-01T11:53:56.000Z | 2022-02-08T14:19:34.000Z | #!/usr/bin/env python3
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Unit Tests *
#******************************************************************************
import sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import UTIL.SYS, UTIL.TASK, UTIL.TCP
#############
# constants #
#############
LINEBUFFERLEN = 256
###########
# classes #
###########
# =============================================================================
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["HOST", "127.0.0.1"],
["SERVER_PORT", "1234"]])
# -----------------------------------------------------------------------------
def createServer():
"""create the TCP server"""
server = TCPserver(portNr=int(UTIL.SYS.s_configuration.SERVER_PORT))
if not server.openConnectPort(UTIL.SYS.s_configuration.HOST):
sys.exit(-1)
# activate zyclic idle function
idleFunction()
# -----------------------------------------------------------------------------
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = UTIL.TASK.ProcessingTask(isParent=True)
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the TCP server
LOG("Open the TCP server")
createServer()
# start the tasks
LOG("start modelTask...")
modelTask.start()
| 39.22695 | 83 | 0.513108 |
13b42a597f46ffd75065d8212ea8951934240d0a | 9,253 | py | Python | tests/clientlib_test.py | yoavcaspi/pre-commit | 77947f212e7b88a479dbe6feebc60a9f773e8c13 | [
"MIT"
] | null | null | null | tests/clientlib_test.py | yoavcaspi/pre-commit | 77947f212e7b88a479dbe6feebc60a9f773e8c13 | [
"MIT"
] | null | null | null | tests/clientlib_test.py | yoavcaspi/pre-commit | 77947f212e7b88a479dbe6feebc60a9f773e8c13 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import logging
import cfgv
import pytest
import pre_commit.constants as C
from pre_commit.clientlib import check_type_tag
from pre_commit.clientlib import CONFIG_HOOK_DICT
from pre_commit.clientlib import CONFIG_REPO_DICT
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import DEFAULT_LANGUAGE_VERSION
from pre_commit.clientlib import MANIFEST_SCHEMA
from pre_commit.clientlib import MigrateShaToRev
from pre_commit.clientlib import validate_config_main
from pre_commit.clientlib import validate_manifest_main
from testing.fixtures import sample_local_config
def test_local_hooks_with_rev_fails():
config_obj = {'repos': [dict(sample_local_config(), rev='foo')]}
with pytest.raises(cfgv.ValidationError):
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_with_local_hooks_definition_passes():
config_obj = {'repos': [sample_local_config()]}
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_schema_does_not_contain_defaults():
"""Due to the way our merging works, if this schema has any defaults they
will clobber potentially useful values in the backing manifest. #227
"""
for item in CONFIG_HOOK_DICT.items:
assert not isinstance(item, cfgv.Optional)
def test_migrate_sha_to_rev_dont_specify_both():
with pytest.raises(cfgv.ValidationError) as excinfo:
MigrateShaToRev().check({'repo': 'a', 'sha': 'b', 'rev': 'c'})
msg, = excinfo.value.args
assert msg == 'Cannot specify both sha and rev'
def test_minimum_pre_commit_version_failing():
with pytest.raises(cfgv.ValidationError) as excinfo:
cfg = {'repos': [], 'minimum_pre_commit_version': '999'}
cfgv.validate(cfg, CONFIG_SCHEMA)
assert str(excinfo.value) == (
'\n'
'==> At Config()\n'
'==> At key: minimum_pre_commit_version\n'
'=====> pre-commit version 999 is required but version {} is '
'installed. Perhaps run `pip install --upgrade pre-commit`.'.format(
C.VERSION,
)
)
def test_minimum_pre_commit_version_passing():
cfg = {'repos': [], 'minimum_pre_commit_version': '0'}
cfgv.validate(cfg, CONFIG_SCHEMA)
| 29.189274 | 78 | 0.556576 |
13b520f65af9148fce1413d5d355fa797126e985 | 5,494 | py | Python | ikalog/ui/options.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 285 | 2015-08-15T14:38:38.000Z | 2022-02-18T15:00:06.000Z | ikalog/ui/options.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 323 | 2015-09-24T12:21:34.000Z | 2018-05-06T16:34:54.000Z | ikalog/ui/options.py | fetus-hina/IkaLog | bd476da541fcc296f792d4db76a6b9174c4777ad | [
"Apache-2.0"
] | 72 | 2015-08-22T00:18:54.000Z | 2022-02-18T14:44:20.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gettext
import wx
import wx.lib.scrolledpanel
import ikalog.outputs
from ikalog.ui.events import *
from ikalog.ui.panel import *
from ikalog.ui import VideoCapture
from ikalog.utils import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
| 32.508876 | 80 | 0.630142 |
13b61f77d9db0538ba6a1a1c9673544d53143882 | 584 | py | Python | setup.py | CyberTKR/Simple-LINELIB | 8596afb6b201b13675a0ed6314b3151f6bbf208b | [
"BSD-3-Clause"
] | 4 | 2022-02-20T11:27:29.000Z | 2022-03-05T00:50:05.000Z | setup.py | CyberTKR/Simple-LINELIB | 8596afb6b201b13675a0ed6314b3151f6bbf208b | [
"BSD-3-Clause"
] | null | null | null | setup.py | CyberTKR/Simple-LINELIB | 8596afb6b201b13675a0ed6314b3151f6bbf208b | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", 'r',encoding="utf-8") as f:
long_description = f.read()
setup(
name='LineBot',
version='0.1.0',
description='Simple-LINELIB',
long_description=long_description,
author='Tolg KR',
author_email='[email protected]',
url='https://github.com/CyberTKR/Simple-LINELIB',
packages=find_packages(include=['CyberTK', 'CyberTK.*']),
install_requires=[
'httpx==0.19.0',
'requests',
'thrift',
'CyberTKAPI'
],
extras_require={'httpx': ['http2']}
)
| 25.391304 | 61 | 0.628425 |
13b64cfd2fd1152628636d4313ba611c18b0ee8d | 4,552 | py | Python | lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | null | null | null | lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py | multi-service-fabric/element-manager | e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f | [
"Apache-2.0"
] | 1 | 2020-04-02T01:17:43.000Z | 2020-04-02T01:17:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: CgwshDeviceDriverSetParameterECDB.py
'''
Parameter module for Cgwsh driver configuration
'''
import GlobalModule
from EmCommonLog import decorater_log
from DriverSetParameterECDB import DriverSetParameterECDB
| 32.985507 | 79 | 0.530097 |
13b73188b11fd452c2465eac91ddbc3efbb01c8c | 13,767 | py | Python | scripts/common_lib/build_lib.py | Bhaskers-Blu-Org1/wc-devops-utilities | d8131261cb3d67ce872b541c5e2d8ff22fcbf614 | [
"Apache-2.0"
] | 15 | 2018-06-26T19:48:08.000Z | 2021-01-18T13:29:16.000Z | scripts/common_lib/build_lib.py | Bhaskers-Blu-Org1/wc-devops-utilities | d8131261cb3d67ce872b541c5e2d8ff22fcbf614 | [
"Apache-2.0"
] | 16 | 2018-05-29T08:12:38.000Z | 2022-02-15T15:25:14.000Z | scripts/common_lib/build_lib.py | IBM/wc-devops-utilities | d8131261cb3d67ce872b541c5e2d8ff22fcbf614 | [
"Apache-2.0"
] | 21 | 2018-05-29T11:54:05.000Z | 2021-12-20T06:42:54.000Z | #!/usr/bin/env python3.6
import os
import subprocess
import json
import argparse
import zipfile
import shutil
import requests
import datetime
import re
import operator
import unicodedata
# global list of error messages to keep track of all error msgs
errorMessages = []
"""
Collection of Common Functions used by Build Scripts
A collection of common functions shared by each individual build scripts.
"""
def get(url, usr, pwd):
"""
HTTP/HTTPS GET requests using external Python module requests
@param url the url of the REST call
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return a JSON response
"""
headers = {
'Accept': 'application/vnd.docker.distribution.manifest.v1+json',
}
# TEMP: Remove the suppressed verification once the docker cert location
# is figured out and we specify it in REQUESTS_CA_BUNDLE
return requests.get(url, auth=(usr, pwd), headers=headers, verify=False)
def get_latest_tag(registry_path, usr, pwd):
"""
Retrieve the latest version of an image based on its tags: vX-YYYYMMDD-HHmm.
The latest, by definition, is defined to be the one with the highest version
number (vX) and the latest timestamp (YYYYMMDD-HHmm).
@param registry_path docker registry path
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return the latest image tag
"""
tag_list_url = registry_path + '/tags/list'
request = get(tag_list_url, usr, pwd)
tag_list = json.loads(request.text)
for tag in tag_list['tags']:
if '-' not in tag:
continue
str_version, str_dash, str_timestamp = tag.partition('-')
tag_format="%Y%m%d-%H%M"
try:
dt_timestamp = datetime.datetime.strptime(str_timestamp, tag_format)
except ValueError:
continue
try:
latest_version
latest_timestamp
latest_tag
except NameError:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_version > str_version:
continue
elif latest_version < str_version:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_timestamp < dt_timestamp:
latest_timestamp = dt_timestamp
latest_tag = tag
return latest_tag
def unzip(zip_file, to_dir):
"""
Generic unzip function for extracting zip files
@param zip_file the zip file to be extracted
@param to_dir the destination directory to extract the zip file to
"""
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(to_dir)
zip_ref.close()
def create_dockerfile(dockerfile_parent_dir, docker_url, image_namespace, image_name, image_tag_latest):
"""
Creates a dockerfile using the correct docker registry URL associated
with the datacenter this script is being run on
:param str dockerfile_parent_dir: path to the parent directory for the Dockerfile
:param str docker_url: the docker registry VIP accessible from the mesos slaves
:param str image_namespace: the name of the image
:param str image_name: the name of the image
:param str image_tag_latest: the latest version tag of the base image
:returns: None
"""
# Form the path for the Dockerfile based on the parent of the caller script
dockerfile_path = os.path.join(dockerfile_parent_dir, "Dockerfile")
# Create the Dockerfile
dockerfile = open(dockerfile_path, "w+")
# Format the FROM command
dockerfile_from_cmd = "FROM " + docker_url + image_namespace + "/" + image_name + ":" + image_tag_latest
# Write the FROM command string to the Dockerfile
dockerfile.write(dockerfile_from_cmd)
# Close the open file instance
dockerfile.close()
def set_docker_client_timeout():
"""
Sets the DOCKER_CLIENT_TIMEOUT environment variable to 300
"""
os.environ['DOCKER_CLIENT_TIMEOUT'] = '300'
print("The timeout set for docker client: " + os.environ['DOCKER_CLIENT_TIMEOUT'] + " seconds")
# ======================= verify bundle Structure ===============================================
def openJSONfile(jsonFile):
"""
Function to open a JSON file
@param jsonFile path to the JSON file
@return the loaded JSON file
"""
try:
with open(jsonFile) as json_data_file:
data = json.load(json_data_file)
except:
addToErrorMessages("The specified JSON file is not valid: " + jsonFile)
raise
return data
def directoryToJSON(directory):
"""
Function to convert objects in a given directory into JSON form.
The parent object is always a dict, it may contain children if type=directory.
A directory is composed of a list and may contain files and/or directories.
@param directory directory to convert
@return JSON representation of a directory
"""
d = {'name': os.path.basename(directory)} # the parent object is dict
if os.path.isdir(directory):
d['type'] = "directory"
# directory may have children
# the children in a directory is a list composed of more files/directories
d['children'] = [directoryToJSON(os.path.join(directory,x)) for x in os.listdir(directory)]
else:
d['type'] = "file"
return d
def verifyBundleStructure(expected, actual, currentPath):
"""
Function to verify if an uploaded bundle follows IBM defined structure
@param expected the JSON representation of the IBM defined structure
@param actual the JSON representation of the actual structure of the uploaded bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@return True if structure of the uploaded bundle follows IBM defined structure. False otherwise.
"""
isMatched = True
if type(expected) is dict:
if matches(expected,actual): # a matching file or directory was found
if expected['type'] == 'directory':
currentPath = currentPath + actual['name'] + "/"
if expected['children'] == "_any":
isMatched = isMatched & True # if the contents of the directory can be anything then do no further checking
else:
isMatched = isMatched & verifyBundleStructure(expected['children'], actual['children'], currentPath) # do further checking
else: # a matching file or directory was not found
if expected['fail-if-not-found'] == "yes":
logBundleStructureErrorMessage(expected, currentPath)
return False
if type(expected) is list:
for k in range(0,len(expected)):
isMatched = isMatched & verifyActualContainsExpectedElement(actual, expected[k], currentPath, isMatched)
return isMatched
def logBundleStructureErrorMessage(expected, currentPath):
"""
Function to adds error messages to the global array.
@param expected the expected element
@param currentPath the current path we are on that has the missing file or directory
"""
addToErrorMessages("A "+ expected['type'] +" is missing from the path: \"" + currentPath + "\"")
addToErrorMessages(expected['error-message-if-fails'])
return
def matches(expectedElement, actualElement):
"""
Function to check if files/directories match. They must have the same name and must both be the same type.
@param expectedElement the expected element. May be defined by regular expression
@param actualElement the actual element
"""
ret = False
if re.fullmatch(expectedElement['name'], actualElement['name']) is not None and expectedElement['type'] == actualElement['type']:
ret = True
return ret
def verifyActualContainsExpectedElement(actual, expectedElement, currentPath, isMatched):
"""
Function to verify if an actual list of objects contains an expected element. Helper method to verifyBundleStructure.
@param actual list of the actual files and directories in the bundle
@param expectedElement the expected element to find in the bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@param isMatched (only used for recursive calls)
@return True if the list of actual objects contain the expected element
"""
# if actual is a dict then verify it and its children
if type(actual) is dict:
isMatched = isMatched & verifyBundleStructure(expectedElement,actual, currentPath)
# if actual is a list then find out if they match anywhere, if so get the matched position
elif type(actual) is list:
matchedPosition = -1
for i in range(0, len(actual)):
if matches(expectedElement,actual[i]):
matchedPosition = i
break
if matchedPosition != -1: # if they match then verify their children too
isMatched = isMatched & verifyBundleStructure(expectedElement, actual[matchedPosition] , currentPath)
else : # if they don't match then log the error msg and return false
if expectedElement['fail-if-not-found'] == "yes": # log error msg and return false if needed
isMatched = False
logBundleStructureErrorMessage(expectedElement, currentPath)
return isMatched
def addToErrorMessages(errorMessage):
"""
Function to add error messages to the global list of errorMessages
@param errorMessage the error message to add
"""
print(errorMessage)
global errorMessges
errorMessages.extend([errorMessage])
return
def unzipRecursively(zipFileName, directoryToUnzipTo):
"""
Function to unzip a ZIP file recursively
@param zipFileName the zip file to be extracted
@param directoryToUnzipTo the destination directory to extract the zip file to
"""
# update
if zipFileName.endswith(".zip"): #check if it's a .zip
unzip(zipFileName,directoryToUnzipTo)
os.remove(zipFileName)
for x in os.listdir(directoryToUnzipTo):
subdirectory = os.path.join(directoryToUnzipTo, os.path.splitext(x)[0])
subfile = os.path.join(directoryToUnzipTo, x )
unzipRecursively(subfile, subdirectory)
return
def zipFileIsGood(filePath):
"""
Function to test if a ZIP file is good or bad
@param filePath the zip file to be tested
@return True if the ZIP file is good. False otherwise.
"""
ret = True
try:
the_zip_file = zipfile.ZipFile(filePath)
badFile = the_zip_file.testzip()
if badFile is not None:
ret = False
else:
ret = True
except:
ret = False
return ret
def verifyZipFile(zipDirectory, nameOfBundle):
"""
Function to verify if an uploaded bundle is:
1) a valid zip file
2) follows IBM defined structure
@param zipDirectory where the bundle ZIP is located
@param nameOfBundle name of the bundle ZIP file
"""
print ('Validating bundle structure...')
bundleIsGood = True
bundleZip = os.path.join(zipDirectory, nameOfBundle)
if zipFileIsGood(bundleZip):
try:
# copy bundle into new working directory -----------------------------------------------------------
directoryToUnzipTo = os.path.join(zipDirectory, "temp")
if not os.path.exists(directoryToUnzipTo):
os.makedirs(directoryToUnzipTo)
shutil.copy(bundleZip, os.path.join(directoryToUnzipTo, nameOfBundle))
# unzip the bundle ----------------------------------------------------------------------------------
unzipRecursively(os.path.join(directoryToUnzipTo, nameOfBundle), os.path.join(directoryToUnzipTo, os.path.splitext(nameOfBundle)[0]))
# verify structure of bundle ------------------------------------------------------------------------
# check package stucture
expectedPackageStructure = openJSONfile(os.path.join(zipDirectory, "bundle-definition.json"))
actualBundleStructure = directoryToJSON(directoryToUnzipTo) # convert the unzipped directory to JSON file
bundleIsGood = verifyBundleStructure(expectedPackageStructure, actualBundleStructure, "")
if not bundleIsGood:
addToErrorMessages("The uploaded bundle does not meet predefined structure. Could not proceed with deployment.")
# clean up unzipped stuff and package structure Json -------------------------------------------------
shutil.rmtree(directoryToUnzipTo)
except:
addToErrorMessages("Exception occurred while verifying bundle structure. Could not proceed with deployment.")
bundleIsGood = False
else:
bundleIsGood = False
addToErrorMessages("The uploaded bundle could not be unzipped. Could not proceed with deployment.")
# out put report value , join all the messages together
print ("report=[" + ". ".join(str(x) for x in errorMessages) + "]")
return bundleIsGood
| 36.81016 | 145 | 0.648217 |
13b7df8dc09a874c18b0de4987e789a5a8c1dfcd | 10,035 | py | Python | src/static_grasp_kt.py | ivalab/GraspKpNet | d4b6186d74ac82a745d778892742d52a204bd1cf | [
"MIT"
] | 16 | 2021-05-04T23:08:47.000Z | 2022-01-19T08:33:14.000Z | src/static_grasp_kt.py | ivalab/GraspKpNet | d4b6186d74ac82a745d778892742d52a204bd1cf | [
"MIT"
] | 2 | 2021-06-22T22:54:44.000Z | 2021-10-04T19:23:35.000Z | src/static_grasp_kt.py | ivalab/GraspKpNet | d4b6186d74ac82a745d778892742d52a204bd1cf | [
"MIT"
] | 2 | 2021-07-10T12:51:29.000Z | 2022-02-17T06:45:54.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import cv2.aruco as aruco
import numpy as np
import sys
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import message_filters
import torch
from external.nms import soft_nms
from opts import opts
from logger import Logger
from utils.utils import AverageMeter
from datasets.dataset_factory import dataset_factory
from detectors.detector_factory import detector_factory
# transformation from the robot base to aruco tag
M_BL = np.array([[1., 0., 0., 0.30000],
[0., 1., 0., 0.32000],
[0., 0., 1., -0.0450],
[0., 0., 0., 1.00000]])
# default transformation from the camera to aruco tag
default_M_CL = np.array([[-0.07134498, -0.99639369, 0.0459293, -0.13825178],
[-0.8045912, 0.03027403, -0.59305689, 0.08434352],
[ 0.58952768, -0.07926594, -0.8038495, 0.66103522],
[ 0., 0., 0., 1. ]]
)
# camera intrinsic matrix of Realsense D435
cameraMatrix = np.array([[607.47165, 0.0, 325.90064],
[0.0, 606.30420, 240.91934],
[0.0, 0.0, 1.0]])
# distortion of Realsense D435
distCoeffs = np.array([0.08847, -0.04283, 0.00134, -0.00102, 0.0])
# initialize GKNet Detector
opt = opts().parse()
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Detector = detector_factory[opt.task]
detector = Detector(opt)
# Publisher of perception result
pub_res = rospy.Publisher('/result', Float64MultiArray, queue_size=10)
def project(pixel, depth_image, M_CL, M_BL, cameraMatrix):
'''
project 2d pixel on the image to 3d by depth info
:param pixel: x, y
:param M_CL: trans from camera to aruco tag
:param cameraMatrix: camera intrinsic matrix
:param depth_image: depth image
:param depth_scale: depth scale that trans raw data to mm
:return:
q_B: 3d coordinate of pixel with respect to base frame
'''
depth = depth_image[pixel[1], pixel[0]]
# if the depth of the detected pixel is 0, check the depth of its neighbors
# by counter-clock wise
nei_range = 1
while depth == 0:
for delta_x in range(-nei_range, nei_range + 1):
for delta_y in range(-nei_range, nei_range + 1):
nei = [pixel[0] + delta_x, pixel[1] + delta_y]
depth = depth_image[nei[1], nei[0]]
if depth != 0:
break
if depth != 0:
break
nei_range += 1
pxl = np.linalg.inv(cameraMatrix).dot(
np.array([pixel[0] * depth, pixel[1] * depth, depth]))
q_C = np.array([pxl[0], pxl[1], pxl[2], 1])
q_L = np.linalg.inv(M_CL).dot(q_C)
q_B = M_BL.dot(q_L)
return q_B
def kinect_rgbd_callback(rgb_data, depth_data):
"""
Save raw RGB and depth input from Kinect V1
:param rgb_data: RGB image
:param depth_data: raw depth image
:return: None
"""
try:
cv_rgb = cv_bridge.imgmsg_to_cv2(rgb_data, "bgr8")
cv_depth = cv_bridge.imgmsg_to_cv2(depth_data, "32FC1")
cv_rgb_arr = np.array(cv_rgb, dtype=np.uint8)
cv_depth_arr = np.array(cv_depth, dtype=np.float32)
# cv_depth_arr = np.nan_to_num(cv_depth_arr)
cv2.imshow("Depth", cv_depth)
cv2.imshow("RGB", cv_rgb)
img = cv_rgb_arr.copy()
depth_raw = cv_depth_arr.copy()
gray = img.astype(np.uint8)
depth = (depth_raw * 1000).astype(np.uint8)
# get the current transformation from the camera to aruco tag
M_CL, corners = get_M_CL_info(gray, img, False)
# remove aruco tag from input image to avoid mis-detection
if corners is not None:
img_wo_at = aruco_tag_remove(img, corners)
# replace blue channel with the depth channel
inp_image = pre_process(img_wo_at, depth)
# pass the image into the network
ret = detector.run(inp_image[:, :, :])
ret = ret["results"]
loc_ori = KpsToGrasppose(ret, img, depth_raw, M_CL, M_BL, cameraMatrix)
pub_res.publish(loc_ori)
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
# initialize ros node
rospy.init_node("Static_grasping")
# Bridge to convert ROS Image type to OpenCV Image type
cv_bridge = CvBridge()
cv2.WITH_QT = False
# Get camera calibration parameters
cam_param = rospy.wait_for_message('/camera/rgb/camera_info', CameraInfo, timeout=None)
# Subscribe to rgb and depth channel
image_sub = message_filters.Subscriber("/camera/rgb/image_rect_color", Image)
depth_sub = message_filters.Subscriber("/camera/depth_registered/image", Image)
ts = message_filters.ApproximateTimeSynchronizer([image_sub, depth_sub], 1, 0.1)
ts.registerCallback(kinect_rgbd_callback)
rospy.spin() | 34.249147 | 107 | 0.623019 |
13b8dc2efbc6e5399774e3bdb1583b1ec3d22dca | 13,278 | py | Python | source/utils/augmentations.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | source/utils/augmentations.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | source/utils/augmentations.py | dovietchinh/multi-task-classification | 23a70300a7a800bc982f87902b6aa1faaf91b489 | [
"RSA-MD"
] | null | null | null | import numpy as np
import cv2
import random
def preprocess(img,img_size,padding=True):
"""[summary]
Args:
img (np.ndarray): images
img_size (int,list,tuple): target size. eg: 224 , (224,224) or [224,224]
padding (bool): padding img before resize. Prevent from image distortion. Defaults to True.
Returns:
images (np.ndarray): images in target size
"""
if padding:
height,width,_ = img.shape
delta = height - width
if delta > 0:
img = np.pad(img,[[0,0],[delta//2,delta//2],[0,0]], mode='constant',constant_values =255)
else:
img = np.pad(img,[[-delta//2,-delta//2],[0,0],[0,0]], mode='constant',constant_values =255)
if isinstance(img_size,int):
img_size = (img_size,img_size)
return cv2.resize(img,img_size)
if __name__ =='__main__':
augmentation_test() | 42.694534 | 131 | 0.581865 |
13b9386ce9cd9ff6be8dca6211a1ab2dc6917f81 | 7,340 | py | Python | BaseTools/Source/Python/GenFds/CapsuleData.py | James992927108/uEFI_Edk2_Practice | 2cac7618dfee10bfa5104a2e167c85425fde0100 | [
"BSD-2-Clause"
] | 6 | 2020-01-10T05:16:15.000Z | 2022-01-06T17:41:58.000Z | BaseTools/Source/Python/GenFds/CapsuleData.py | James992927108/uEFI_Edk2_Practice | 2cac7618dfee10bfa5104a2e167c85425fde0100 | [
"BSD-2-Clause"
] | null | null | null | BaseTools/Source/Python/GenFds/CapsuleData.py | James992927108/uEFI_Edk2_Practice | 2cac7618dfee10bfa5104a2e167c85425fde0100 | [
"BSD-2-Clause"
] | 3 | 2018-04-21T07:59:33.000Z | 2018-04-23T02:06:01.000Z | ## @file
# generate capsule
#
# Copyright (c) 2007-2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Ffs
from GenFdsGlobalVariable import GenFdsGlobalVariable
import StringIO
from struct import pack
import os
from Common.Misc import SaveFileOnChange
import uuid
## base class for capsule data
#
#
## FFS class for capsule data
#
#
## FV class for capsule data
#
#
## FD class for capsule data
#
#
## AnyFile class for capsule data
#
#
## Afile class for capsule data
#
#
| 29.837398 | 143 | 0.568665 |
13b9d127851e263bb83cf946e93cc967e190ce5a | 453 | py | Python | CalculatingPi/pi_linear_plot.py | davidmallasen/Hello_MPI | 8a5b5694ffc1515d2bb2dee45355f92f1b68fbed | [
"MIT"
] | null | null | null | CalculatingPi/pi_linear_plot.py | davidmallasen/Hello_MPI | 8a5b5694ffc1515d2bb2dee45355f92f1b68fbed | [
"MIT"
] | null | null | null | CalculatingPi/pi_linear_plot.py | davidmallasen/Hello_MPI | 8a5b5694ffc1515d2bb2dee45355f92f1b68fbed | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
# Read data
size = []
time = []
with open("pi_linear.txt") as file:
for line in file.readlines():
x, y = line.split(',')
size.append(int(x.strip()))
time.append(float(y.strip()))
# Plot data
fig, ax = plt.subplots()
ax.plot(size, time)
ax.set(xlabel='Num. processes', ylabel='Time (s)',
title='Pi linear')
#ax.grid()
fig.savefig("pi_linear.png")
plt.show()
| 19.695652 | 50 | 0.611479 |
13bb8f895f0b886cb437c77e5bc0bd429007f636 | 2,193 | py | Python | esque_wire/protocol/structs/api/elect_preferred_leaders_response.py | real-digital/esque-wire | eb02c49f38b89ad5e5d25aad15fb4ad795e52807 | [
"MIT"
] | null | null | null | esque_wire/protocol/structs/api/elect_preferred_leaders_response.py | real-digital/esque-wire | eb02c49f38b89ad5e5d25aad15fb4ad795e52807 | [
"MIT"
] | 7 | 2019-11-26T08:19:49.000Z | 2021-03-15T14:27:47.000Z | esque_wire/protocol/structs/api/elect_preferred_leaders_response.py | real-digital/esque-wire | eb02c49f38b89ad5e5d25aad15fb4ad795e52807 | [
"MIT"
] | null | null | null | from typing import ClassVar, List, Optional
from ...constants import ApiKey, ErrorCode
from ..base import ResponseData
| 36.55 | 112 | 0.690378 |
13bc25bc6434cc017d92bbc47c055999ff8c038c | 3,181 | py | Python | tests/stack_test.py | arthurlogilab/py_zipkin | 8e733506c399967ea74c56b99a9a421e1bb1736a | [
"Apache-2.0"
] | 225 | 2016-09-16T17:57:51.000Z | 2022-02-12T22:15:32.000Z | tests/stack_test.py | arthurlogilab/py_zipkin | 8e733506c399967ea74c56b99a9a421e1bb1736a | [
"Apache-2.0"
] | 156 | 2016-09-17T03:50:04.000Z | 2021-03-17T23:19:40.000Z | tests/stack_test.py | arthurlogilab/py_zipkin | 8e733506c399967ea74c56b99a9a421e1bb1736a | [
"Apache-2.0"
] | 53 | 2016-09-20T18:34:08.000Z | 2021-08-31T06:14:03.000Z | import mock
import pytest
import py_zipkin.storage
| 34.576087 | 83 | 0.727759 |
13bd80e7104701ce224a3004f95e9aa8f8c681e9 | 2,293 | py | Python | myapp/processes/plotter.py | cp4cds/cp4cds-wps-template | ed170fcee72146dc07c64f76ec71cc289672fd32 | [
"Apache-2.0"
] | null | null | null | myapp/processes/plotter.py | cp4cds/cp4cds-wps-template | ed170fcee72146dc07c64f76ec71cc289672fd32 | [
"Apache-2.0"
] | null | null | null | myapp/processes/plotter.py | cp4cds/cp4cds-wps-template | ed170fcee72146dc07c64f76ec71cc289672fd32 | [
"Apache-2.0"
] | null | null | null |
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput
from pywps import Format
import logging
LOGGER = logging.getLogger('PYWPS')
import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
import cartopy.crs as ccrs
from netCDF4 import Dataset
AIR_DS = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.derived/surface/air.mon.ltm.nc'
| 32.295775 | 115 | 0.613171 |
13be33895810fafc0b133ddfa170c7d200a7bd44 | 56 | py | Python | json_schema_checker/composed/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | json_schema_checker/composed/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | json_schema_checker/composed/__init__.py | zorgulle/json_schema_checker | 20cac68f899528619e5059f0e1fbee0a0f7219d6 | [
"MIT"
] | null | null | null | from .composed import List
from .composed import IntList | 28 | 29 | 0.839286 |
13beebf4acd9b21bb28b852b68ff91457137cd72 | 9,767 | py | Python | backend/social_quiz.py | jmigual/socialQuiz | 3d9d0980961619b555732899121d8ce6366fa96f | [
"MIT"
] | null | null | null | backend/social_quiz.py | jmigual/socialQuiz | 3d9d0980961619b555732899121d8ce6366fa96f | [
"MIT"
] | null | null | null | backend/social_quiz.py | jmigual/socialQuiz | 3d9d0980961619b555732899121d8ce6366fa96f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
import os.path
import random
import re
from flask import Flask, send_from_directory
from flask import request, abort
from flaskrun.flaskrun import flask_run
import datab.social_database as db
app = Flask(__name__)
# Regular expression to only accept certain files
fileChecker = re.compile(r"(.*\.js|.*\.html|.*\.png|.*\.css|.*\.map)$")
numberOfAnswers = 4
random.seed(7)
if __name__ == '__main__':
flask_run(app)
| 33.221088 | 115 | 0.61288 |
13bef8558df71652db939d620d20eb4457b48c53 | 10,282 | py | Python | astacus/node/snapshotter.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 19 | 2020-06-22T12:17:59.000Z | 2022-02-18T00:12:17.000Z | astacus/node/snapshotter.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 7 | 2020-06-24T05:16:20.000Z | 2022-02-28T07:35:31.000Z | astacus/node/snapshotter.py | aiven/astacus | 2d64e1f33e01d50a41127f41d9da3d1ab0ce0387 | [
"Apache-2.0"
] | 2 | 2020-09-05T21:23:08.000Z | 2022-02-17T15:02:37.000Z | """
Copyright (c) 2020 Aiven Ltd
See LICENSE for details
"""
from astacus.common import magic, utils
from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState
from astacus.common.progress import increase_worth_reporting, Progress
from pathlib import Path
from typing import Optional
import base64
import hashlib
import logging
import os
import threading
logger = logging.getLogger(__name__)
_hash = hashlib.blake2s
| 41.128 | 122 | 0.642774 |
13c016b99333655007d9a8cc82e9391a0d3526d8 | 6,671 | py | Python | colcon_gradle/task/gradle/build.py | richiware/colcon-gradle | 00b121def8c15abd1dca310d0ea4e1f34f98f4d1 | [
"Apache-2.0"
] | null | null | null | colcon_gradle/task/gradle/build.py | richiware/colcon-gradle | 00b121def8c15abd1dca310d0ea4e1f34f98f4d1 | [
"Apache-2.0"
] | null | null | null | colcon_gradle/task/gradle/build.py | richiware/colcon-gradle | 00b121def8c15abd1dca310d0ea4e1f34f98f4d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Esteve Fernandez
# Licensed under the Apache License, Version 2.0
from distutils import dir_util
import glob
import os
from pathlib import Path
import shutil
from colcon_core.environment import create_environment_scripts
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import satisfies_version
from colcon_core.shell import create_environment_hook
from colcon_core.shell import get_command_environment
from colcon_core.task import run
from colcon_core.task import TaskExtensionPoint
from colcon_gradle.task.gradle import get_wrapper_executable
from colcon_gradle.task.gradle import GRADLE_EXECUTABLE
from colcon_gradle.task.gradle import has_wrapper_executable
logger = colcon_logger.getChild(__name__)
| 37.268156 | 95 | 0.644431 |
13c18896742aca9b72a3db6ff3b991575fad3170 | 5,092 | py | Python | model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py | eladc-git/model_optimization | 46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from model_compression_toolkit.common.constants import MIN_THRESHOLD, THRESHOLD
def ste_ceil(x: tf.Tensor) -> tf.Tensor:
"""
Return the ceil values of a tensor.
"""
error = tf.stop_gradient(tf.math.ceil(x) - x)
return error + x
def ste_round(x: tf.Tensor) -> tf.Tensor:
"""
Return the rounded values of a tensor.
"""
error = tf.stop_gradient(tf.math.round(x) - x)
return error + x
def log2(x: tf.Tensor) -> tf.Tensor:
"""
Compute log2 of a tensor.
"""
return tf.math.log(x) / tf.math.log(2.0)
def power_of_two_max(max_tensor: tf.Tensor) -> tf.Tensor:
"""
Compute the power of two threshold for a tensor.
"""
return tf.math.pow(2.0, ste_ceil(log2(tf.maximum(max_tensor, MIN_THRESHOLD))))
def calculate_delta(max_tensor: tf.Tensor,
num_bits: int,
signed: bool) -> tf.Tensor:
"""
Compute the step size for the quantization.
"""
return max_tensor / (2 ** (num_bits - int(signed)))
def adjustable_steps(x: tf.Variable, t: float) -> tf.Tensor:
"""
A function to gradually quantize a float variable to an integer of values [-1, 0 ,1]
Args:
x: input float variable
t: temperature to control quantization
Returns:
semi-quantized variable
"""
return tf.sigmoid(tf.add(x, 1) / t) + tf.sigmoid(tf.add(x, -1) / t) - 1
def ste_clip(x: [tf.Tensor, tf.Variable], max_val=1, min_val=None) -> tf.Tensor:
"""
clip a variable between fixed values such that min_val<=output<=max_val
Args:
x: input variable
max_val: maximum value for clipping
min_val: minimum value for clipping (defaults to -max_val)
Returns:
clipped variable
"""
min_val = -max_val if min_val is None else min_val
return tf.stop_gradient(tf.math.minimum(tf.math.maximum(x, min_val), max_val) - x) + x
def symmetric_quantizer(input_tensor: tf.Tensor,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool) -> tf.Tensor:
"""
Quantize a tensor symmetrically.
Args:
input_tensor: Tensor to quantize.
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(input_tensor / delta)
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * tf.math.minimum(tf.math.maximum(tensor_q, min_int), max_int)
def symmetric_constrained_quantizer(input_tensor: tf.Tensor,
auxvar_tensor: tf.Variable,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool,
max_lsbs_change: int = 1) -> tf.Tensor:
"""
Quantize a tensor symmetrically with maximum LSBs shift.
Args:
input_tensor: Tensor to quantize. values of this tensor are not changed during gptq.
auxvar_tensor: Tensor that manifests the bit shift the weight due to gptq
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
max_lsbs_change: maximum number of LSBs that the auxvar is allowed to change
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(tf.stop_gradient(tf.round(input_tensor / delta)) + ste_clip(auxvar_tensor, max_val=max_lsbs_change))
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * ste_clip(tensor_q, max_val=max_int, min_val=min_int)
| 34.876712 | 125 | 0.626866 |
13c2b5d7ceaee0819464ed2dba5f6801b590f3e0 | 9,421 | py | Python | pygments/lexers/tnt.py | btashton/pygments | ceaad0372055ed0064121020fea032fdda429779 | [
"BSD-2-Clause"
] | 1 | 2020-05-04T00:34:41.000Z | 2020-05-04T00:34:41.000Z | pygments/lexers/tnt.py | btashton/pygments | ceaad0372055ed0064121020fea032fdda429779 | [
"BSD-2-Clause"
] | 1 | 2019-03-08T20:01:19.000Z | 2019-03-08T20:01:19.000Z | pygments/lexers/tnt.py | btashton/pygments | ceaad0372055ed0064121020fea032fdda429779 | [
"BSD-2-Clause"
] | 1 | 2019-03-08T19:44:02.000Z | 2019-03-08T19:44:02.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.tnt
~~~~~~~~~~~~~~~~~~~
Lexer for Typographic Number Theory.
:copyright: Copyright 2019-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer
from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
Punctuation, Error
__all__ = ['TNTLexer']
| 37.987903 | 85 | 0.519478 |
13c2bfaf0d362a6c304791ee3c1accf9f548727b | 1,233 | py | Python | contacts/urls.py | cheradenine/Django-CRM | 692572ced050d314c1f880af8b4000c97cbf7440 | [
"MIT"
] | 2 | 2019-08-30T14:42:45.000Z | 2019-09-01T01:49:38.000Z | contacts/urls.py | cheradenine/Django-CRM | 692572ced050d314c1f880af8b4000c97cbf7440 | [
"MIT"
] | 8 | 2020-06-05T20:58:52.000Z | 2022-03-11T23:48:48.000Z | contacts/urls.py | gthreepwood/Django-CRM | 12de7e6c622d9d7483c210212c8b7fe3dbde2739 | [
"MIT"
] | 1 | 2019-05-31T16:06:24.000Z | 2019-05-31T16:06:24.000Z | from django.urls import path
from contacts.views import (
ContactsListView, CreateContactView, ContactDetailView,
UpdateContactView, RemoveContactView,
GetContactsView, AddCommentView, UpdateCommentView,
DeleteCommentView, AddAttachmentsView, DeleteAttachmentsView)
app_name = 'contacts'
urlpatterns = [
path('list/', ContactsListView.as_view(), name='list'),
path('create/', CreateContactView.as_view(), name='add_contact'),
path('<int:pk>/view/', ContactDetailView.as_view(), name="view_contact"),
path('<int:pk>/edit/', UpdateContactView.as_view(), name="edit_contact"),
path('<int:pk>/delete/',
RemoveContactView.as_view(),
name="remove_contact"),
path('get/list/', GetContactsView.as_view(), name="get_contacts"),
path('comment/add/', AddCommentView.as_view(), name="add_comment"),
path('comment/edit/', UpdateCommentView.as_view(), name="edit_comment"),
path('comment/remove/',
DeleteCommentView.as_view(),
name="remove_comment"),
path('attachment/add/',
AddAttachmentsView.as_view(),
name="add_attachment"),
path('attachment/remove/', DeleteAttachmentsView.as_view(),
name="remove_attachment"),
]
| 36.264706 | 77 | 0.691809 |
13c4fe2bf0cd10d5be8344221103967c7cea77fd | 12,883 | py | Python | windows/winobject/network.py | marpie/PythonForWindows | b253bc5873e7d97087ed22f2753b51fc6880ec18 | [
"BSD-3-Clause"
] | 1 | 2018-11-15T11:15:56.000Z | 2018-11-15T11:15:56.000Z | windows/winobject/network.py | killvxk/PythonForWindows | b253bc5873e7d97087ed22f2753b51fc6880ec18 | [
"BSD-3-Clause"
] | null | null | null | windows/winobject/network.py | killvxk/PythonForWindows | b253bc5873e7d97087ed22f2753b51fc6880ec18 | [
"BSD-3-Clause"
] | 1 | 2020-12-25T12:59:10.000Z | 2020-12-25T12:59:10.000Z | import windows
import ctypes
import socket
import struct
from windows import winproxy
import windows.generated_def as gdef
from windows.com import interfaces as cominterfaces
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
class TCP6Connection(MIB_TCP6ROW_OWNER_PID):
"""A TCP6 socket (connected or listening)"""
def close(self):
raise NotImplementedError("Closing IPV6 connection non implemented")
def __repr__(self):
if not self.established:
return "<TCP IPV6 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV6 Connection {0}:{1} -> {2}:{3}>".format(self.local_addr, self.local_port, self.remote_addr, self.remote_port)
def get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
return _GENERATED_MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
def get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
# Struct _MIB_TCP6TABLE_OWNER_PID definitions
return _GENERATED_MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
class Firewall(cominterfaces.INetFwPolicy2):
"""The windows firewall"""
def enabled_for_profile_type(self, profile_type):
enabled = gdef.VARIANT_BOOL()
self.get_FirewallEnabled(profile_type, enabled)
return enabled.value
class FirewallRule(cominterfaces.INetFwRule):
"""A rule of the firewall"""
def __repr__(self):
return u'<{0} "{1}">'.format(type(self).__name__, self.name).encode("ascii", errors='backslashreplace')
class Network(object):
NetFwPolicy2 = windows.com.IID.from_string("E2B3C97F-6AE1-41AC-817A-F6F92166D7DD")
ipv4 = property(lambda self: self._get_tcp_ipv4_sockets())
"""List of TCP IPv4 socket (connection and listening)
:type: [:class:`TCP4Connection`]"""
ipv6 = property(lambda self: self._get_tcp_ipv6_sockets())
"""List of TCP IPv6 socket (connection and listening)
:type: [:class:`TCP6Connection`]
"""
| 28.756696 | 134 | 0.623613 |
13c55ddf22e3a453950de6b6142214790512cd06 | 4,269 | py | Python | LIM_scripts/func_curry.py | Bhare8972/LOFAR-LIM | 89f25be8c02cb8980c2e237da3eaac279d40a06a | [
"MIT"
] | 3 | 2019-04-21T13:13:02.000Z | 2020-10-15T12:44:23.000Z | LIM_scripts/func_curry.py | Bhare8972/LOFAR-LIM | 89f25be8c02cb8980c2e237da3eaac279d40a06a | [
"MIT"
] | null | null | null | LIM_scripts/func_curry.py | Bhare8972/LOFAR-LIM | 89f25be8c02cb8980c2e237da3eaac279d40a06a | [
"MIT"
] | 2 | 2018-11-06T18:34:33.000Z | 2019-04-04T14:16:57.000Z | #!/usr/bin/env python3
# Coded by Massimiliano Tomassoli, 2012.
#
# - Thanks to b49P23TIvg for suggesting that I should use a set operation
# instead of repeated membership tests.
# - Thanks to Ian Kelly for pointing out that
# - "minArgs = None" is better than "minArgs = -1",
# - "if args" is better than "if len(args)", and
# - I should use "isdisjoint".
#
def genCur(func, unique = True, minArgs = None):
""" Generates a 'curried' version of a function. """
return g
if __name__ == "__main__":
# Simple Function.
# NOTE: '<====' means "this line prints to the screen".
# Example 1.
f = cur(func) # f is a "curried" version of func
c1 = f(1)
c2 = c1(2, d = 4) # Note that c is still unbound
c3 = c2(3)(f = 6)(e = 5) # now c = 3
c3() # () forces the evaluation <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(30)(f = 60)(e = 50) # now c = 30
c4() # () forces the evaluation <====
# it prints "1 2 30 4 50 60 100"
print("\n------\n")
# Example 2.
f = curr(func) # f is a "curried" version of func
# curr = cur with possibly repeated
# keyword args
c1 = f(1, 2)(3, 4)
c2 = c1(e = 5)(f = 6)(e = 10)() # ops... we repeated 'e' because we <====
# changed our mind about it!
# again, () forces the evaluation
# it prints "1 2 3 4 10 6 100"
print("\n------\n")
# Example 3.
f = cur(func, 6) # forces the evaluation after 6 arguments
c1 = f(1, 2, 3) # num args = 3
c2 = c1(4, f = 6) # num args = 5
c3 = c2(5) # num args = 6 ==> evalution <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(5, g = -1) # num args = 7 ==> evaluation <====
# we can specify more than 6 arguments, but
# 6 are enough to force the evaluation
# it prints "1 2 3 4 5 6 -1"
print("\n------\n")
# Example 4.
printTree(func)
print("\n------\n")
stress(cur(f2), 100) | 38.809091 | 84 | 0.444601 |
13c5d0054209f9afb389d03f1764cab446c01a96 | 742 | py | Python | src/messages.py | Ewpratten/chat | 4cc8461e442b6530b7874f234b1a2261f3db8456 | [
"MIT"
] | null | null | null | src/messages.py | Ewpratten/chat | 4cc8461e442b6530b7874f234b1a2261f3db8456 | [
"MIT"
] | null | null | null | src/messages.py | Ewpratten/chat | 4cc8461e442b6530b7874f234b1a2261f3db8456 | [
"MIT"
] | null | null | null | greeting = """
--------------- BEGIN SESSION ---------------
You have connected to a chat server. Welcome!
:: About
Chat is a small piece of server software
written by Evan Pratten to allow people to
talk to eachother from any computer as long
as it has an internet connection. (Even an
arduino!). Check out the project at:
https://github.com/Ewpratten/chat
:: Disclaimer
While chatting, keep in mind that, if there
is a rule or regulation about privacy, this
server does not follow it. All data is sent
to and from this server over a raw TCP socket
and data is temporarily stored in plaintext
while the server handles message broadcasting
Now that's out of the way so, happy chatting!
---------------------------------------------
""" | 32.26087 | 45 | 0.690027 |
13c625629058a335547038a4cdc3550a5d9f78a2 | 3,572 | py | Python | Prediction.py | khayam-hafezi/CRNN-keras-persian | 3f99838e5b3b0e0ca79899e25b0648940b7fdfac | [
"MIT"
] | null | null | null | Prediction.py | khayam-hafezi/CRNN-keras-persian | 3f99838e5b3b0e0ca79899e25b0648940b7fdfac | [
"MIT"
] | null | null | null | Prediction.py | khayam-hafezi/CRNN-keras-persian | 3f99838e5b3b0e0ca79899e25b0648940b7fdfac | [
"MIT"
] | null | null | null | import cv2
import itertools, os, time
import numpy as np
from Model import get_Model
from parameter import letters
import argparse
from keras import backend as K
K.set_learning_phase(0)
Region = {"A": " ", "B": " ", "C": " ", "D": " ", "E": " ", "F": " ",
"G": " ", "H": " ", "I": " ", "J": " ", "K": " ", "L": " ",
"M": " ", "N": " ", "O": " ", "P": " "}
Hangul = {"dk": "", "dj": "", "dh": "", "dn": "", "qk": "", "qj": "", "qh": "", "qn": "",
"ek": "", "ej": "", "eh": "", "en": "", "rk": "", "rj": "", "rh": "", "rn": "",
"wk": "", "wj": "", "wh": "", "wn": "", "ak": "", "aj": "", "ah": "", "an": "",
"sk": "", "sj": "", "sh": "", "sn": "", "fk": "", "fj": "", "fh": "", "fn": "",
"tk": "", "tj": "", "th": "", "tn": "", "gj": ""}
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--weight", help="weight file directory",
type=str, default="models/weights.best.hdf5")
parser.add_argument("-t", "--test_img", help="Test image directory",
type=str, default="./DB/test/")
args = parser.parse_args()
# Get CRNN model
model = get_Model(training=False)
try:
model.load_weights(args.weight)
print("...Previous weight data...")
except:
raise Exception("No weight file!")
test_dir =args.test_img
test_imgs = os.listdir(args.test_img)
total = 0
acc = 0
letter_total = 0
letter_acc = 0
start = time.time()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = model.predict(img_pred)
pred_texts = decode_label(net_out_value)
for i in range(min(len(pred_texts), len(test_img[0:-4]))):
if pred_texts[i] == test_img[i]:
letter_acc += 1
letter_total += max(len(pred_texts), len(test_img[0:-4]))
predOk = "True"
if pred_texts == test_img[0:-4]:
acc += 1
else:
predOk = "False"
total += 1
# print('Predicted: %s / True: %s / net_out_value: %s / ' % (label_to_hangul(pred_texts), label_to_hangul(test_img[0:-4])))
print('Predicted: %s / True: %s / predOk: %s ' % (pred_texts, test_img[0:-4], predOk ))
# cv2.rectangle(img, (0,0), (150, 30), (0,0,0), -1)
# cv2.putText(img, pred_texts, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
#cv2.imshow("q", img)
#if cv2.waitKey(0) == 27:
# break
#cv2.destroyAllWindows()
end = time.time()
total_time = (end - start)
print("Time : ",total_time / total)
print("ACC : ", acc / total)
print("letter ACC : ", letter_acc / letter_total)
| 32.472727 | 129 | 0.555151 |
13c705bea50bc8d33e8f2c2e57d0e51683dbf67b | 8,038 | py | Python | torcharrow/_interop.py | OswinC/torcharrow | 45a57c45afeffee488c51e3387179292b3504a6c | [
"BSD-3-Clause"
] | null | null | null | torcharrow/_interop.py | OswinC/torcharrow | 45a57c45afeffee488c51e3387179292b3504a6c | [
"BSD-3-Clause"
] | null | null | null | torcharrow/_interop.py | OswinC/torcharrow | 45a57c45afeffee488c51e3387179292b3504a6c | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow dataframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.DataFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.DataFrame(res, device=device)
def from_pandas_dataframe(
df,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert pandas dataframe to torcharrow dataframe (drops indices).
Parameters
----------
df : Pandas dataframe
dtype : dtype, default None
Data type to force, if None will automatically infer.
columns : array-like
List of column names to extract from df.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import pandas as pd
>>> import torcharrow as ta
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gdf = ta.from_pandas_dataframe(pdf)
>>> gdf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_pandas_series(
pd.Series(df[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in df.columns:
if columns is None or n in columns:
res[n] = from_pandas_series(pd.Series(df[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_pandas_series(series, dtype=None, scope=None, device=""):
""" "
Convert pandas series array to a torcharrow column (drops indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(series.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero copy).
"""
scope = scope or Scope.default
device = device or scope.device
if isinstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif isinstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
| 31.155039 | 99 | 0.61682 |
13c7d55115d132308c18e527238726863764f8de | 3,883 | py | Python | research/gan/image_compression/eval.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/gan/image_compression/eval.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/gan/image_compression/eval.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a TFGAN trained compression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from research.gan.image_compression import data_provider
from research.gan.image_compression import networks
from research.gan.image_compression import summaries
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', 'testdata', 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
if __name__ == '__main__':
app.run(_)
| 38.83 | 80 | 0.702807 |
13c974d988a5a072e9adfbe93d6a9ef5022a8ab3 | 1,712 | py | Python | source/dump_query_results.py | CheyenneNS/metrics | cfeeac6d01d99679897a998b193d630ada169c61 | [
"MIT"
] | null | null | null | source/dump_query_results.py | CheyenneNS/metrics | cfeeac6d01d99679897a998b193d630ada169c61 | [
"MIT"
] | null | null | null | source/dump_query_results.py | CheyenneNS/metrics | cfeeac6d01d99679897a998b193d630ada169c61 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python
import os
import mysql.connector as mysql
metrics_mysql_password = os.environ['METRICS_MYSQL_PWD']
sql_host = os.environ['SQL_HOST']
metrics = os.environ['QUERY_ON']
def dump_query_results():
"""
This is a simple SQL table dump of a given query so we can supply users with custom tables.
Note that the SQL query itself and column headers portion need to be changed if you want to change
the query/results. Otherwise it is good to go.
It can be called simply with the bin shell script.
Read the README at the top level for an example.
"""
#connect to mysql
db_connection = mysql.connect(
host = sql_host,#"mysql1", #"localhost",
user = "metrics", #"root",
passwd = metrics_mysql_password,
database = "metrics" #"datacamp"
)
cursor = db_connection.cursor()
query = "use "+metrics
cursor.execute(query)
#CHANGE QUERY HERE
query = "select username, display_name, email, orcid, kb_internal_user, institution, country, signup_date, last_signin_date from user_info order by signup_date"
#CHANGE COLUMN HEADERS HERE TO MATCH QUERY HEADERS
print("username\tdisplay_name\temail\torcid\tkb_internal_user\tinstitution\tcountry\tsignup_date\tlast_signin_date")
cursor.execute(query)
row_values = list()
for (row_values) in cursor:
temp_string = ""
for i in range(len(row_values) - 1):
if row_values[i] is not None:
temp_string += str(row_values[i])
temp_string += "\t"
if row_values[-1] is not None:
temp_string += str(row_values[-1])
print(temp_string)
return 1
dump_query_results()
| 33.568627 | 164 | 0.675234 |
13c9e31c590bfc0b0cb4adceaddaadb36e84b31a | 538 | py | Python | desktop_local_tests/windows/test_windows_packet_capture_disrupt_force_public_dns_servers.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 219 | 2017-12-12T09:42:46.000Z | 2022-03-13T08:25:13.000Z | desktop_local_tests/windows/test_windows_packet_capture_disrupt_force_public_dns_servers.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 11 | 2017-12-14T08:14:51.000Z | 2021-08-09T18:37:45.000Z | desktop_local_tests/windows/test_windows_packet_capture_disrupt_force_public_dns_servers.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 45 | 2017-12-14T07:26:36.000Z | 2022-03-11T09:36:56.000Z | from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.windows.windows_dns_force_public_dns_servers_disrupter import WindowsDNSForcePublicDNSServersDisrupter
| 53.8 | 127 | 0.877323 |
13ca0867e3b5094c9f6e2eb05d9af7e3c93bd96a | 16,159 | py | Python | kivy/loader.py | geojeff/kivy | 25ab20e5b0e87269531abe1f8cc76bf270bcc755 | [
"MIT"
] | 1 | 2017-11-15T08:59:23.000Z | 2017-11-15T08:59:23.000Z | kivy/loader.py | 5y/kivy | 6bee66946f5434ca92921a8bc9559d82ec955896 | [
"MIT"
] | null | null | null | kivy/loader.py | 5y/kivy | 6bee66946f5434ca92921a8bc9559d82ec955896 | [
"MIT"
] | 3 | 2015-07-18T11:03:59.000Z | 2018-03-17T01:32:42.000Z | '''
Asynchronous data loader
========================
This is the Asynchronous Loader. You can use it to load an image
and use it, even if data are not yet available. You must specify a default
loading image for using a such loader::
from kivy import *
image = Loader.image('mysprite.png')
You can also load image from url::
image = Loader.image('http://mysite.com/test.png')
If you want to change the default loading image, you can do::
Loader.loading_image = Image('another_loading.png')
Tweaking the asynchronous loader
--------------------------------
.. versionadded:: 1.6.0
You can now tweak the loader to have a better user experience or more
performance, depending of the images you're gonna to load. Take a look at the
parameters:
- :data:`Loader.num_workers` - define the number of threads to start for
loading images
- :data:`Loader.max_upload_per_frame` - define the maximum image uploads in
GPU to do per frames.
'''
__all__ = ('Loader', 'LoaderBase', 'ProxyImage')
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.cache import Cache
from kivy.core.image import ImageLoader, Image
from kivy.compat import PY2
from collections import deque
from time import sleep
from os.path import join
from os import write, close, unlink, environ
import threading
# Register a cache for loader
Cache.register('kv.loader', limit=500, timeout=60)
#
# Loader implementation
#
if 'KIVY_DOC' in environ:
Loader = None
else:
#
# Try to use pygame as our first choice for loader
#
from kivy.compat import queue
from threading import Thread
Loader = LoaderThreadPool()
Logger.info('Loader: using a thread pool of {} workers'.format(
Loader.num_workers))
| 31.684314 | 80 | 0.590692 |
13ca207f4f2fb0793c69bd95418294b71cd89e2c | 467 | py | Python | Season 01 - Intro to Python/Episode 13 - Join.py | Pythobit/Python-tutorial | b0743eaa9c237c3578131ead1b3f2c295f11b7ee | [
"MIT"
] | 3 | 2021-02-19T18:33:00.000Z | 2021-08-03T14:56:50.000Z | Season 01 - Intro to Python/Episode 13 - Join.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
] | 1 | 2021-07-10T14:37:57.000Z | 2021-07-20T09:51:39.000Z | Season 01 - Intro to Python/Episode 13 - Join.py | barawalojas/Python-tutorial | 3f4b2b073e421888b3d62ff634658317d9abcb9b | [
"MIT"
] | 1 | 2021-08-02T05:39:38.000Z | 2021-08-02T05:39:38.000Z | # 13. Join
# it allows to print list a bit better
friends = ['Pythobit','boy','Pythoman']
print(f'My friends are {friends}.') # Output - My friends are ['Pythobit', 'boy', 'Pythoman'].
# So, the Output needs to be a bit clearer.
friends = ['Pythobit','boy','Pythoman']
friend = ', '.join(friends)
print(f'My friends are {friend}') # Output - My friends are Pythobit, boy, Pythoman
# Here (, ) comma n space is used as separator, but you can use anything.
| 38.916667 | 98 | 0.663812 |
13caf57909dc254d637b57702b6b442c435e3b48 | 2,327 | py | Python | buildsettings.py | randomizax/polygon-label | 5091bd54aee5166d418b240f34d7a5c336685c06 | [
"MIT"
] | null | null | null | buildsettings.py | randomizax/polygon-label | 5091bd54aee5166d418b240f34d7a5c336685c06 | [
"MIT"
] | null | null | null | buildsettings.py | randomizax/polygon-label | 5091bd54aee5166d418b240f34d7a5c336685c06 | [
"MIT"
] | null | null | null | # settings file for builds.
# if you want to have custom builds, copy this file to "localbuildsettings.py" and make changes there.
# possible fields:
# resourceBaseUrl - optional - the URL base for external resources (all resources embedded in standard IITC)
# distUrlBase - optional - the base URL to use for update checks
# buildMobile - optional - if set, mobile builds are built with 'ant'. requires the Android SDK and appropriate mobile/local.properties file configured
# preBuild - optional - an array of strings to run as commands, via os.system, before building the scripts
# postBuild - optional - an array of string to run as commands, via os.system, after all builds are complete
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'randomizax': {
'resourceUrlBase': None,
'distUrlBase': 'https://randomizax.github.io/polygon-label',
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
# defaultBuild - the name of the default build to use if none is specified on the build.py command line
# (in here as an example - it only works in localbuildsettings.py)
#defaultBuild = 'local'
| 42.309091 | 151 | 0.701762 |
13cbb884947e5c5ee43f164c1fde11e81811776b | 4,399 | py | Python | osaka/storage/sftp.py | riverma/osaka | f9ed386936500303c629d7213d91215085bcf346 | [
"Apache-2.0"
] | 2 | 2018-05-08T03:13:49.000Z | 2022-02-09T08:48:06.000Z | osaka/storage/sftp.py | riverma/osaka | f9ed386936500303c629d7213d91215085bcf346 | [
"Apache-2.0"
] | 6 | 2019-02-06T19:12:09.000Z | 2022-02-08T04:29:49.000Z | osaka/storage/sftp.py | riverma/osaka | f9ed386936500303c629d7213d91215085bcf346 | [
"Apache-2.0"
] | 12 | 2018-04-08T12:58:29.000Z | 2022-03-31T18:35:53.000Z | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import int
from future import standard_library
standard_library.install_aliases()
import os
import os.path
import stat
import urllib.parse
import paramiko
import traceback
import osaka.utils
"""
A backend used to handle stfp using parimiko
@author starchmd
"""
| 30.130137 | 90 | 0.562855 |
13cc4a79cdbfb09ff64440ffca1bacc5cc651798 | 4,192 | py | Python | thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py | heavenlysf/thesis | 646553c45860f337c91a48ab7f666a174784472f | [
"MIT"
] | null | null | null | thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py | heavenlysf/thesis | 646553c45860f337c91a48ab7f666a174784472f | [
"MIT"
] | null | null | null | thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py | heavenlysf/thesis | 646553c45860f337c91a48ab7f666a174784472f | [
"MIT"
] | null | null | null | import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
RENDER_RATIO = 2
| 34.644628 | 77 | 0.530057 |
13cd2bb4addf837c7a09ba721bc230b691ca3e1b | 2,080 | py | Python | src/internal_representation_analysis/decoder/StateDataset.py | aidkilda/understanding-drl-navigation | 0d637c2390a935ec1182d4f2d5165644d98d6404 | [
"MIT"
] | null | null | null | src/internal_representation_analysis/decoder/StateDataset.py | aidkilda/understanding-drl-navigation | 0d637c2390a935ec1182d4f2d5165644d98d6404 | [
"MIT"
] | null | null | null | src/internal_representation_analysis/decoder/StateDataset.py | aidkilda/understanding-drl-navigation | 0d637c2390a935ec1182d4f2d5165644d98d6404 | [
"MIT"
] | null | null | null | import random
from internal_representation_analysis.network import ActorCriticFFNetwork
from internal_representation_analysis.scene_loader import THORDiscreteEnvironment as Environment
from internal_representation_analysis.constants import MINI_BATCH_SIZE
| 38.518519 | 96 | 0.669712 |
13cd2ed4d981d4b892a318dfe3960eb2c118e4ce | 3,147 | py | Python | test_dataset_model.py | ferrine/PerceptualSimilarity | 2ff66e86b12dbfbc337991def71b09e3b86d4b12 | [
"BSD-2-Clause"
] | null | null | null | test_dataset_model.py | ferrine/PerceptualSimilarity | 2ff66e86b12dbfbc337991def71b09e3b86d4b12 | [
"BSD-2-Clause"
] | null | null | null | test_dataset_model.py | ferrine/PerceptualSimilarity | 2ff66e86b12dbfbc337991def71b09e3b86d4b12 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from models import dist_model as dm
from data import data_loader as dl
import argparse
from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_mode", type=str, default="2afc", help="[2afc,jnd]")
parser.add_argument(
"--datasets",
type=str,
nargs="+",
default=[
"val/traditional",
"val/cnn",
"val/superres",
"val/deblur",
"val/color",
"val/frameinterp",
],
help="datasets to test - for jnd mode: [val/traditional],[val/cnn]; for 2afc mode: [train/traditional],[train/cnn],[train/mix],[val/traditional],[val/cnn],[val/color],[val/deblur],[val/frameinterp],[val/superres]",
)
parser.add_argument(
"--model",
type=str,
default="net-lin",
help="distance model type [net-lin] for linearly calibrated net, [net] for off-the-shelf network, [l2] for euclidean distance, [ssim] for Structured Similarity Image Metric",
)
parser.add_argument(
"--net",
type=str,
default="alex",
help="[squeeze], [alex], or [vgg] for network architectures",
)
parser.add_argument(
"--colorspace",
type=str,
default="Lab",
help="[Lab] or [RGB] for colorspace to use for l2, ssim model types",
)
parser.add_argument(
"--batch_size", type=int, default=50, help="batch size to test image patches in"
)
parser.add_argument("--use_gpu", action="store_true", help="turn on flag to use GPU")
parser.add_argument(
"--model_path",
type=str,
default=None,
help="location of model, will default to ./weights/v[version]/[net_name].pth",
)
parser.add_argument(
"--from_scratch", action="store_true", help="model was initialized from scratch"
)
parser.add_argument(
"--train_trunk", action="store_true", help="model trunk was trained/tuned"
)
parser.add_argument(
"--version",
type=str,
default="0.1",
help="v0.1 is latest, v0.0 was original release",
)
opt = parser.parse_args()
if opt.model in ["l2", "ssim"]:
opt.batch_size = 1
# initialize model
model = dm.DistModel()
# model.initialize(model=opt.model,net=opt.net,colorspace=opt.colorspace,model_path=opt.model_path,use_gpu=opt.use_gpu)
model.initialize(
model=opt.model,
net=opt.net,
colorspace=opt.colorspace,
model_path=opt.model_path,
use_gpu=opt.use_gpu,
pnet_rand=opt.from_scratch,
pnet_tune=opt.train_trunk,
version=opt.version,
)
if opt.model in ["net-lin", "net"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.net))
elif opt.model in ["l2", "ssim"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.colorspace))
# embed()
# initialize data loader
for dataset in opt.datasets:
data_loader = dl.CreateDataLoader(
dataset, dataset_mode=opt.dataset_mode, batch_size=opt.batch_size
)
# evaluate model on data
if opt.dataset_mode == "2afc":
(score, results_verbose) = dm.score_2afc_dataset(data_loader, model.forward)
elif opt.dataset_mode == "jnd":
(score, results_verbose) = dm.score_jnd_dataset(data_loader, model.forward)
# print results
print(" Dataset [%s]: %.2f" % (dataset, 100.0 * score))
| 30.553398 | 218 | 0.67048 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.