blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7e3544a76de5495c3cd0761fe45aa1f6c3227d1c | d66818f4b951943553826a5f64413e90120e1fae | /hackerearth/Algorithms/Foo and Exams/test.py | 5f769f75d7e391b7e97605e6f823c1e48e0947d1 | [
"MIT"
] | permissive | HBinhCT/Q-project | 0f80cd15c9945c43e2e17072416ddb6e4745e7fa | 19923cbaa3c83c670527899ece5c3ad31bcebe65 | refs/heads/master | 2023-08-30T08:59:16.006567 | 2023-08-29T15:30:21 | 2023-08-29T15:30:21 | 247,630,603 | 8 | 1 | MIT | 2020-07-22T01:20:23 | 2020-03-16T06:48:02 | Python | UTF-8 | Python | false | false | 548 | py | import io
import unittest
from contextlib import redirect_stdout
from unittest.mock import patch
class TestQ(unittest.TestCase):
@patch('builtins.input', side_effect=[
'2',
'2 2 2 2 10',
'2 3 5 7 1000',
])
def test_case_0(self, input_mock=None):
text_trap = io.StringIO()
with redirect_stdout(text_trap):
import solution
self.assertEqual(text_trap.getvalue(),
'1\n' +
'7\n')
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9cf33f7b6e47a4d3933a7bb52923ed6a13dbdcbb | 7b437e095068fb3f615203e24b3af5c212162c0d | /enaml/wx/wx_factories.py | ff75d3f1abc7037597e7f21f73d30b03aa6bd316 | [
"BSD-3-Clause"
] | permissive | ContinuumIO/enaml | d8200f97946e5139323d22fba32c05231c2b342a | 15c20b035a73187e8e66fa20a43c3a4372d008bd | refs/heads/master | 2023-06-26T16:16:56.291781 | 2013-03-26T21:13:52 | 2013-03-26T21:13:52 | 9,047,832 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,135 | py | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
def action_factory():
from .wx_action import WxAction
return WxAction
def action_group_factory():
from .wx_action_group import WxActionGroup
return WxActionGroup
def calendar_factory():
from .wx_calendar import WxCalendar
return WxCalendar
def check_box_factory():
from .wx_check_box import WxCheckBox
return WxCheckBox
def combo_box_factory():
from .wx_combo_box import WxComboBox
return WxComboBox
def container_factory():
from .wx_container import WxContainer
return WxContainer
def date_selector_factory():
from .wx_date_selector import WxDateSelector
return WxDateSelector
# def datetime_selector_factory():
# from .wx_datetime_selector import WxDatetimeSelector
# return WxDatetimeSelector
def dock_pane_factory():
from .wx_dock_pane import WxDockPane
return WxDockPane
def field_factory():
from .wx_field import WxField
return WxField
def group_box_factory():
from .wx_group_box import WxGroupBox
return WxGroupBox
def html_factory():
from .wx_html import WxHtml
return WxHtml
# def image_view_factory():
# from .wx_image_view import WxImageView
# return WxImageView
def label_factory():
from .wx_label import WxLabel
return WxLabel
def main_window_factory():
from .wx_main_window import WxMainWindow
return WxMainWindow
def menu_factory():
from .wx_menu import WxMenu
return WxMenu
def menu_bar_factory():
from .wx_menu_bar import WxMenuBar
return WxMenuBar
def mpl_canvas_factory():
from .wx_mpl_canvas import WxMPLCanvas
return WxMPLCanvas
def notebook_factory():
from .wx_notebook import WxNotebook
return WxNotebook
def page_factory():
from .wx_page import WxPage
return WxPage
def push_button_factory():
from .wx_push_button import WxPushButton
return WxPushButton
def progress_bar_factory():
from .wx_progress_bar import WxProgressBar
return WxProgressBar
def radio_button_factory():
from .wx_radio_button import WxRadioButton
return WxRadioButton
def scroll_area_factory():
from .wx_scroll_area import WxScrollArea
return WxScrollArea
def slider_factory():
from .wx_slider import WxSlider
return WxSlider
def spin_box_factory():
from .wx_spin_box import WxSpinBox
return WxSpinBox
def split_item_factory():
from .wx_split_item import WxSplitItem
return WxSplitItem
def splitter_factory():
from .wx_splitter import WxSplitter
return WxSplitter
# def text_editor_factory():
# from .wx_text_editor import WxTextEditor
# return WxTextEditor
def tool_bar_factory():
from .wx_tool_bar import WxToolBar
return WxToolBar
def window_factory():
from .wx_window import WxWindow
return WxWindow
WX_FACTORIES = {
'Action': action_factory,
'ActionGroup': action_group_factory,
'Calendar': calendar_factory,
'CheckBox': check_box_factory,
'ComboBox': combo_box_factory,
'Container': container_factory,
'DateSelector': date_selector_factory,
'DockPane': dock_pane_factory,
'Field': field_factory,
'GroupBox': group_box_factory,
'Html': html_factory,
'Label': label_factory,
'MainWindow': main_window_factory,
'Menu': menu_factory,
'MenuBar': menu_bar_factory,
'MPLCanvas': mpl_canvas_factory,
'Notebook': notebook_factory,
'Page': page_factory,
'PushButton': push_button_factory,
'ProgressBar': progress_bar_factory,
'RadioButton': radio_button_factory,
'ScrollArea': scroll_area_factory,
'Slider': slider_factory,
'SpinBox': spin_box_factory,
'SplitItem': split_item_factory,
'Splitter': splitter_factory,
'ToolBar': tool_bar_factory,
'Window': window_factory,
}
| [
"[email protected]"
] | |
d1989cdd6717ce76b1db0189d8bdca041eb9cf69 | c99427245fdb9cb3f1d9f788963759664885b6ec | /tests/models/test_driver_response.py | 7811b95f8930a672138a5f21d99db91fb41da588 | [] | no_license | QualiSystems/cloudshell-shell-connectivity-flow | 98cf33e49fbdef147e37dbda0179b76400492e36 | c2a1bb8589b25a62dd7c20f7ef35c7f2f8d5aa1c | refs/heads/master | 2023-08-22T22:48:28.051220 | 2023-08-17T18:04:19 | 2023-08-17T18:27:45 | 218,023,805 | 0 | 1 | null | 2023-08-17T18:27:46 | 2019-10-28T10:42:09 | Python | UTF-8 | Python | false | false | 1,468 | py | import pytest
from cloudshell.shell.flows.connectivity.models.driver_response import (
ConnectivityActionResult,
DriverResponseRoot,
)
@pytest.mark.parametrize(
("success", "msg"), ((True, "success msg"), (False, "error msg"))
)
def test_connectivity_action_result(success, msg, action_model):
if success:
result = ConnectivityActionResult.success_result(action_model, msg)
assert result.infoMessage == msg
assert result.errorMessage == ""
else:
result = ConnectivityActionResult.fail_result(action_model, msg)
assert result.infoMessage == ""
assert result.errorMessage == msg
assert result.success is success
assert result.actionId == action_model.action_id
assert result.type == action_model.type.value
assert result.updatedInterface == action_model.action_target.name
def test_prepare_response(action_model):
result = ConnectivityActionResult.success_result(action_model, "success msg")
response = DriverResponseRoot.prepare_response([result])
assert response.driverResponse.actionResults[0] == result
assert response.json() == (
'{"driverResponse": {"actionResults": ['
'{"actionId": "96582265-2728-43aa-bc97-cefb2457ca44_0900c4b5-0f90-42e3-b495", '
'"type": "removeVlan", '
'"updatedInterface": "centos", '
'"infoMessage": "success msg", '
'"errorMessage": "", '
'"success": true'
"}]}}"
)
| [
"[email protected]"
] | |
c3112deb23ba50edf3fb5f4abc9447ee7a5e1ffb | 818432c7fdf26abff9f3988ce5f3ef419564b062 | /ReferentialGym/modules/batch_reshape_repeat_module.py | a15d557a40e4740ab5170269046558e9154f9e4d | [
"MIT"
] | permissive | mk788/ReferentialGym | 3f0703d4def489354278bba642e79d3b5bac5db2 | afe22da2ac20c0d24e93b4dbd1f1ad61374d1a6c | refs/heads/master | 2023-03-16T01:32:21.826448 | 2021-03-03T12:41:42 | 2021-03-03T12:41:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,760 | py | from typing import Dict, List
import torch
import torch.nn as nn
import torch.optim as optim
from .module import Module
def build_BatchReshapeRepeatModule(id:str,
config:Dict[str,object],
input_stream_keys:List[str]) -> Module:
return BatchReshapeRepeatModule(id=id,
config=config,
input_stream_keys=input_stream_keys)
class BatchReshapeRepeatModule(Module):
def __init__(self,
id:str,
config:Dict[str,object],
input_stream_keys:List[str]):
"""
Reshape input streams data while keeping the batch dimension identical.
:param config: Dict of parameters. Expectes:
- "new_shape": List of None/Tuple/List/torch.Size representing the new shape
of each input stream, without mentionning the batch dimension.
If multiple input streams are proposed but only one element in this
list, then the list is expanded by repeating the last element.
- "repetition": List of None/Tuple/List/torch.Size representing the repetition
of each input stream, without mentionning the batch dimension.
If multiple input streams are proposed but only one element in this
list, then the list is expanded by repeating the last element.
"""
input_stream_ids = {
f"input_{idx}":ik
for idx, ik in enumerate(input_stream_keys)
}
assert "new_shape" in config,\
"BatchReshapeRepeatModule relies on 'new_shape' list.\n\
Not found in config."
assert "repetition" in config,\
"BatchReshapeRepeatModule relies on 'repetition' list.\n\
Not found in config."
super(BatchReshapeRepeatModule, self).__init__(id=id,
type="BatchReshapeRepeatModule",
config=config,
input_stream_ids=input_stream_ids)
self.new_shape = self.config["new_shape"]
assert isinstance(self.new_shape, list)
self.repetition = self.config["repetition"]
assert isinstance(self.repetition, list)
self.n_input_streams = len(self.input_stream_ids)
while len(self.new_shape) < self.n_input_streams:
self.new_shape.append(self.new_shape[-1])
while len(self.repetition) < self.n_input_streams:
self.repetition.append(self.repetition[-1])
def compute(self, input_streams_dict:Dict[str,object]) -> Dict[str,object] :
"""
Operates on inputs_dict that is made up of referents to the available stream.
Make sure that accesses to its element are non-destructive.
:param input_streams_dict: dict of str and data elements that
follows `self.input_stream_ids`"s keywords and are extracted
from `self.input_stream_keys`-named streams.
:returns:
- outputs_stream_dict:
"""
outputs_stream_dict = {}
for idx, (k, inp) in enumerate(input_streams_dict.items()):
new_shape = self.new_shape[idx]
if new_shape is None:
new_shape = inp.shape[1:]
n_inp = inp.reshape(inp.shape[0], *new_shape)
repeat = self.repetition[idx]
if repeat is not None:
n_inp = n_inp.repeat(1, *repeat)
outputs_stream_dict[f"output_{idx}"] = n_inp
return outputs_stream_dict
| [
"[email protected]"
] | |
957af4ae3ebfa8a436ddd99b94dbc339d8387feb | 5c964389074a95f166ae682c4196226d46adec09 | /leet390.py | c99f7a2a8947a85ff5ce8cd45f7c710cb89f3da7 | [] | no_license | shach934/leetcode | 0a64e851be1419f19da8d2d09294f19758b355b8 | 59de9ba6620c64efbd2cc0aab8c22a82b2c0df21 | refs/heads/master | 2023-01-12T01:48:31.877822 | 2020-11-16T20:48:37 | 2020-11-16T20:48:37 | 306,147,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | 390. Elimination Game
There is a list of sorted integers from 1 to n. Starting from left to right, remove the first number and every other number afterward until you reach the end of the list.
Repeat the previous step again, but this time from right to left, remove the right most number and every other number from the remaining numbers.
We keep repeating the steps again, alternating left to right and right to left, until a single number remains.
Find the last number that remains starting with a list of length n.
Example:
Input:
n = 9,
1 2 3 4 5 6 7 8 9
2 4 6 8
2 6
6
Output:
6
class Solution(object):
def lastRemaining(self, n):
"""
:type n: int
:rtype: int
"""
head, tail, length, step, forward = 0, n-1, n, 2, True
while length > 1:
if forward:
head = head + step // 2
#print([i+1 for i in range(head, n, step)])
length //= 2
tail = head + step * (length - 1)
step *= 2
forward = False
else:
tail = tail - step//2
length //= 2
head = tail - step * (length - 1)
#print([i for i in range(head, tail+1, step)])
step *= 2
forward = True
return head + 1 | [
"[email protected]"
] | |
d01231c94199e4348df604a2592dd5e6dfabe359 | eddb777e95b6c6baa413fd049f959ab593de5041 | /src/main/python/bedk/simpleEventExample.py | 596b022b83c7b74cbf21aba5cb4fbb85ca16b057 | [] | no_license | jdgwartney/sandbox | bfb641b448b2ffb0e0446cdd942165c7501abd8e | 62f7a3a034a73de938c9f241638ff1170373e932 | refs/heads/master | 2020-05-16T23:35:38.215681 | 2014-03-31T14:16:45 | 2014-03-31T14:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | #!/usr/bin/env python3
from boundary.api.event import createEvent
from boundary.api.event import getEvent
myApiHost = 'api.boundary.com'
myOrganizationID = '3ehRi7uZeeaTN12dErF5XOnRXjC'
myApiKey = 'ARI0PzUzWYUo7GG1OxiHmABTpr9'
myFingerprintFields = '@title'
myTitle = 'My shiny new event'
#
# Create a new boundary Event
#
eventId = createEvent(myApiHost,
myApiKey,
myFingerprintFields,
myTitle,myOrganizationID)
print('event id: ' + eventId)
#
# Fetch the newly created boundary event
#
newEvent = getEvent(myApiHost,
myApiKey,
eventId)
print(newEvent)
| [
"[email protected]"
] | |
6895b4868d46ed8ceb2a03fcedf4563bbb31f208 | b9f4e78555a2644c56fd6cd1ac82c5b729875d70 | /atomic_utils.py | 0185009c1bda6c267a8d78bbd7ad7a75f4bd5877 | [] | no_license | taiyan33/elegant-concurrency-lab | 7be31a6f6c49c33f25e6cc02c1a19d0a1dc71a8b | 781800eaf3149643178e0df3a41f6b9fcf0e6cec | refs/heads/master | 2020-04-21T07:59:46.085819 | 2017-06-11T05:17:06 | 2017-06-11T05:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | #!/usr/bin/env python
import requests
from bs4 import BeautifulSoup
PYCON_TW_ROOT_URL = 'https://tw.pycon.org/'
# conform accessing only its frame
def query_text(url):
return requests.get(url).text
def parse_out_href_gen(text):
# soup is bs4.BeautifulSoup
# a_tag is bs4.element.Tag
soup = BeautifulSoup(text, 'html.parser')
return (a_tag.get('href', '') for a_tag in soup.find_all('a'))
def is_relative_href(url):
return not url.startswith('http') and not url.startswith('mailto:')
# conform using atomic operators
url_visted_map = {}
def is_visited_or_mark(url):
visited = url_visted_map.get(url, False)
if not visited:
url_visted_map[url] = True
return visited
if __name__ == '__main__':
# test cases
print('Testing query_text ... ', end='')
text = query_text('https://tw.pycon.org')
print(repr(text[:40]))
print('Testing parse_out_href_gen ... ', end='')
href_gen = parse_out_href_gen(text)
print(repr(list(href_gen)[:3]))
print('Testing is_relative_href ...')
assert is_relative_href('2017/en-us')
assert is_relative_href('/2017/en-us')
assert not is_relative_href('https://www.facebook.com/pycontw')
assert not is_relative_href('mailto:[email protected]')
print('Testing is_visited_or_mark ...')
assert not is_visited_or_mark('/')
assert is_visited_or_mark('/')
# benchmark
from time import time
print('Benchmarking query_text ... ', end='') # 40x
s = time()
text = query_text('https://tw.pycon.org')
e = time()
print(f'{e-s:.4}s')
print('Benchmarking parse_out_href_gen ... ', end='') # 1x
s = time()
list(parse_out_href_gen(text))
e = time()
print(f'{e-s:.4}s')
| [
"[email protected]"
] | |
7add687cf561df1746688e09b316cec08365024c | 5c84ae48b34b5e2bd67c5301cd81f0de9da7decd | /core/__init__.py | 9cb75c21bccc21b70930c052672ca9546c1d30bf | [] | no_license | makesaturdays/saturdays.class | 00a575620d7e162f7f4de981209265396676683d | 031bda06aced31cd13c87f25928ff93e25be826f | refs/heads/master | 2021-01-12T10:12:23.570143 | 2017-01-12T20:05:52 | 2017-01-12T20:05:52 | 76,386,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py |
from flask import Flask
import os
import sys
if getattr(sys, 'frozen', False):
app_path = os.path.abspath(os.path.dirname(sys.executable))
app_path = app_path.replace('/server.app/Contents/MacOS', '')
elif __file__:
app_path = os.path.abspath(os.path.dirname(__file__))+'/..'
app = Flask(__name__, static_folder=app_path+'/files', template_folder=app_path+'/layouts')
app.path = app_path
app.config.from_pyfile(app.path+'/config/environment.py')
from core.pages import * | [
"[email protected]"
] | |
3356fa4113a988b211f50570de2a247ca18fa6d1 | 98bda873a0235ed64ab37cc74bfaa7ead473100b | /89s.py | 631c39eef8b2a80ec9628768d7f5a8f303ec363b | [] | no_license | soundarya-1609/set5 | d402894733ccef0efb86b6d5c350adfb0012f5bc | 3830b336a96086a5f5d0efb8cb320bbc209f1fe3 | refs/heads/master | 2020-06-14T15:21:05.121760 | 2019-07-08T08:12:15 | 2019-07-08T08:12:15 | 195,039,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | puts=input()
gets=sorted(puts)
print(''.join(map(str,gets)))
| [
"[email protected]"
] | |
960ca7e4a5e66160874bd0b6cb357b1e3ede8b7e | f5d7660f0f2c32e79bb4d65f0f221147c9d3646b | /src/spring/azext_spring/vendored_sdks/appplatform/v2023_03_01_preview/aio/operations/_customized_accelerators_operations.py | 8ae15f6ab0bfb2ec9ed31a02ee8ec43649dc9855 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | markrzasa/azure-cli-extensions | d6a09d18f818a7795dcfea49fefe3d717d85557a | 7ea11998bb4a5f079b090e0fa637bd7b87dc9b7e | refs/heads/main | 2023-08-16T18:23:39.143846 | 2023-08-04T06:08:01 | 2023-08-04T06:08:01 | 398,926,578 | 0 | 0 | MIT | 2021-08-23T00:04:04 | 2021-08-23T00:04:03 | null | UTF-8 | Python | false | false | 38,624 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._customized_accelerators_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_request,
build_validate_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CustomizedAcceleratorsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2023_03_01_preview.aio.AppPlatformManagementClient`'s
:attr:`customized_accelerators` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, resource_group_name: str, service_name: str, application_accelerator_name: str, **kwargs: Any
) -> AsyncIterable["_models.CustomizedAcceleratorResource"]:
"""Handle requests to list all customized accelerators.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomizedAcceleratorResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
cls: ClsType[_models.CustomizedAcceleratorResourceCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("CustomizedAcceleratorResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators"
}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
**kwargs: Any
) -> _models.CustomizedAcceleratorResource:
"""Get the customized accelerator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomizedAcceleratorResource or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
cls: ClsType[_models.CustomizedAcceleratorResource] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("CustomizedAcceleratorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
customized_accelerator_resource: Union[_models.CustomizedAcceleratorResource, IO],
**kwargs: Any
) -> _models.CustomizedAcceleratorResource:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CustomizedAcceleratorResource] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(customized_accelerator_resource, (IO, bytes)):
_content = customized_accelerator_resource
else:
_json = self._serialize.body(customized_accelerator_resource, "CustomizedAcceleratorResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("CustomizedAcceleratorResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("CustomizedAcceleratorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
customized_accelerator_resource: _models.CustomizedAcceleratorResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.CustomizedAcceleratorResource]:
"""Create or update the customized accelerator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param customized_accelerator_resource: The customized accelerator for the create or update
operation. Required.
:type customized_accelerator_resource:
~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomizedAcceleratorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
customized_accelerator_resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.CustomizedAcceleratorResource]:
"""Create or update the customized accelerator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param customized_accelerator_resource: The customized accelerator for the create or update
operation. Required.
:type customized_accelerator_resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomizedAcceleratorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
customized_accelerator_resource: Union[_models.CustomizedAcceleratorResource, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.CustomizedAcceleratorResource]:
"""Create or update the customized accelerator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param customized_accelerator_resource: The customized accelerator for the create or update
operation. Is either a CustomizedAcceleratorResource type or a IO type. Required.
:type customized_accelerator_resource:
~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either CustomizedAcceleratorResource or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.CustomizedAcceleratorResource] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
customized_accelerator_resource=customized_accelerator_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("CustomizedAcceleratorResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
**kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}"
}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the customized accelerator.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}"
}
@overload
async def validate(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
properties: _models.CustomizedAcceleratorProperties,
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[_models.CustomizedAcceleratorValidateResult]:
"""Check the customized accelerator are valid.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param properties: Customized accelerator properties to be validated. Required.
:type properties:
~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorProperties
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomizedAcceleratorValidateResult or None or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorValidateResult
or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def validate(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> Optional[_models.CustomizedAcceleratorValidateResult]:
"""Check the customized accelerator are valid.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param properties: Customized accelerator properties to be validated. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomizedAcceleratorValidateResult or None or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorValidateResult
or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def validate(
self,
resource_group_name: str,
service_name: str,
application_accelerator_name: str,
customized_accelerator_name: str,
properties: Union[_models.CustomizedAcceleratorProperties, IO],
**kwargs: Any
) -> Optional[_models.CustomizedAcceleratorValidateResult]:
"""Check the customized accelerator are valid.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param application_accelerator_name: The name of the application accelerator. Required.
:type application_accelerator_name: str
:param customized_accelerator_name: The name of the customized accelerator. Required.
:type customized_accelerator_name: str
:param properties: Customized accelerator properties to be validated. Is either a
CustomizedAcceleratorProperties type or a IO type. Required.
:type properties:
~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorProperties or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CustomizedAcceleratorValidateResult or None or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2023_03_01_preview.models.CustomizedAcceleratorValidateResult
or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-03-01-preview"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.CustomizedAcceleratorValidateResult]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "CustomizedAcceleratorProperties")
request = build_validate_request(
resource_group_name=resource_group_name,
service_name=service_name,
application_accelerator_name=application_accelerator_name,
customized_accelerator_name=customized_accelerator_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.validate.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("CustomizedAcceleratorValidateResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/applicationAccelerators/{applicationAcceleratorName}/customizedAccelerators/{customizedAcceleratorName}/validate"
}
| [
"[email protected]"
] | |
d7adeb2560642c4a30f71925b7ad592a459f774d | 2eccd1b25468e278ba6568063901dfa6608c0271 | /assignment5/reducer.py | a2bfeaafa6f2cdc3970fae62fa71a7c5926b98a4 | [
"MIT"
] | permissive | IITDU-BSSE06/ads-demystifying-the-logs-Toufiqur0636 | 089aa00e8dd1c858e01b0945ac24ecc886044f96 | 4b01a8721b7fc869737572de1bdc155f86397e9b | refs/heads/master | 2021-07-26T00:51:26.576645 | 2017-11-08T06:58:52 | 2017-11-08T06:58:52 | 109,025,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #!/usr/bin/python
import sys
path_map = dict()
for line in sys.stdin:
path = line.strip()
path_map[path] = path_map.get(path, 0) + 1
print len(path_map)
| [
"[email protected]"
] | |
a8aa733cd8ee2d48a463b17928cdb8c8623b6a9f | 6c285510932df0477ae33752d0f7c3b153a462c7 | /examples/pygmsh/screw.py | 5143666808bc7f6013b3294f50bc69d90c78fa8a | [
"MIT"
] | permissive | kebitmatf/meshzoo | 4b24a223daadb9c057abb2b083cf401c74220072 | 6c7ac99af2c7c0673d69b85bd3330ee9a9b67f90 | refs/heads/master | 2020-12-02T17:46:29.592835 | 2017-04-06T10:17:20 | 2017-04-06T10:17:20 | 96,425,485 | 1 | 0 | null | 2017-07-06T12:00:25 | 2017-07-06T12:00:25 | null | UTF-8 | Python | false | false | 869 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import pygmsh as pg
import numpy as np
def create_screw_mesh():
geom = pg.Geometry()
# Draw a cross.
poly = geom.add_polygon([
[0.0, 0.5, 0.0],
[-0.1, 0.1, 0.0],
[-0.5, 0.0, 0.0],
[-0.1, -0.1, 0.0],
[0.0, -0.5, 0.0],
[0.1, -0.1, 0.0],
[0.5, 0.0, 0.0],
[0.1, 0.1, 0.0]
],
lcar=0.05
)
axis = [0, 0, 1]
geom.extrude(
'Surface{%s}' % poly,
translation_axis=axis,
rotation_axis=axis,
point_on_axis=[0, 0, 0],
angle=2.0 / 6.0 * np.pi
)
points, cells = pg.generate_mesh(geom)
return points, cells['tetra']
if __name__ == '__main__':
import meshio
points, cells = create_screw_mesh()
meshio.write('screw.e', points, {'tetra': cells})
| [
"[email protected]"
] | |
93b81ba56da6268cbd01cb00ed76b8d353f62f83 | 94460fe9a2df490b1763f60b25b26bce5d3d5940 | /EyePatterns/clustering_algorithms/affinity_propagation.py | b987e9dbb65634f5c650b49c7269067f11f622a3 | [
"MIT"
] | permissive | Sale1996/Pattern-detection-of-eye-tracking-scanpaths | 1711262280dde728e1b2559e9ca9f9f66d3d514f | 15c832f26dce98bb95445f9f39f454f99bbb6029 | refs/heads/master | 2022-12-09T03:01:12.260438 | 2020-08-27T11:38:42 | 2020-08-27T11:38:42 | 290,264,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | # from work L Frey, Brendan J., and Delbert Dueck. "Clustering by passing messages between data points." science 315.5814 (2007): 972-976
# https://science.sciencemag.org/content/315/5814/972
import numpy as np
import sklearn.cluster
class AffinityPropagation:
def __init__(self, affinity='precomputed', damping=0.5):
self.affinity = affinity
self.damping = damping
def fit(self, similarity_scores):
self.aff_prop = sklearn.cluster.AffinityPropagation(affinity=self.affinity, damping=self.damping)
self.aff_prop.fit(similarity_scores)
def get_exemplars_and_their_features(self, data):
exemplar_features_map = {}
for cluster_id in np.unique(self.aff_prop.labels_):
exemplar = data[self.aff_prop.cluster_centers_indices_[cluster_id]]
cluster = np.unique(data[np.nonzero(self.aff_prop.labels_ == cluster_id)])
exemplar_features_map[exemplar] = cluster
return exemplar_features_map
| [
"[email protected]"
] | |
347fedbdef33c7456662ebee31abe968b83b35a9 | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/7 kyu/Last 541629460b198da04e000bb9.py | e5417442445b5919eba81b62a2941374fb315ad6 | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | # https://www.codewars.com/kata/541629460b198da04e000bb9
def last(*args):
return args[-1][-1] if isinstance(args[-1], (list, str)) else args[-1] | [
"[email protected]"
] | |
6483bf1d9e0ab9d094eeb3e3abb6169b01a6ab06 | 74649c1220c68ad0af79e420d572e3769fcd7a53 | /mlprodict/onnxrt/ops_cpu/op_negative_log_likelihood_loss.py | aca354dbbd58eebedc07f69186c63df0d5149f30 | [
"MIT"
] | permissive | sdpython/mlprodict | e62edcb428700cb2c4527e54e96431c1d2b36118 | 27d6da4ecdd76e18292f265fde61d19b66937a5c | refs/heads/master | 2023-05-08T10:44:30.418658 | 2023-03-08T22:48:56 | 2023-03-08T22:48:56 | 112,469,804 | 60 | 13 | MIT | 2023-04-19T01:21:38 | 2017-11-29T11:57:10 | Python | UTF-8 | Python | false | false | 3,472 | py | """
@file
@brief Runtime operator.
"""
import numpy
from ._op import OpRun
def _compute_negative_log_likelihood_loss(x, target, weight=None,
reduction=b'mean', ignore_index=None):
"""
Modified version of `softmaxcrossentropy.py
<https://github.com/onnx/onnx/blob/main/onnx/backend/
test/case/node/negativeloglikelihoodloss.py>`_ to handle other type
than float32.
"""
input_shape = x.shape
if len(input_shape) == 1:
raise RuntimeError(f"Unsupported shape {input_shape!r}.")
target_shape = target.shape
N = input_shape[0]
C = input_shape[1]
# initialize the positional weights when required
gather_weight = None
if weight is not None:
# setting mode='clip' to deal with ignore_index > C or < 0 cases.
# when the target value is > C or < 0, it doesn't matter which value we are
# taking in gather_weight, since it will be set to 0 in the following if-block
# use numpy.int32 to make it compatible with x86 machines
gather_weight = numpy.take(weight, numpy.array(
target, dtype=numpy.int32), mode='clip')
# set `ignore_index`'s loss weight to 0.
# The loss tensor will be multiplied by this weight tensor,
# so `ingore_index`'s loss value will be eliminated.
if ignore_index is not None:
gather_weight = numpy.where(
target == ignore_index, 0, gather_weight).astype(dtype=x.dtype)
elif ignore_index != -1:
gather_weight = numpy.where(
target == ignore_index, 0, 1).astype(dtype=x.dtype)
# if input is 4-d and above, make it 3-d
if len(input_shape) != 3:
x = x.reshape((N, C, -1))
target = target.reshape((N, -1))
# Get a dimension from the reshaped input.
# If the original input shape is [N, C, H, W],
# the D here should be H * W because we reshape
# [N, C, H, W] to [N, C, H * W].
D = x.shape[2]
neg_gather_element_input = numpy.zeros((N, D), dtype=x.dtype)
for i in range(N):
for d in range(D):
if target[i][d] != ignore_index:
neg_gather_element_input[i][d] = -x[i][target[i][d]][d]
loss = neg_gather_element_input
# if the input was 4-d or above reshape to the right shape
if len(input_shape) != 3:
loss = loss.reshape(target_shape)
# apply the weights when required
if gather_weight is not None:
loss = gather_weight * loss
if reduction == b'mean':
loss = loss.sum() / gather_weight.sum()
return (loss, )
if reduction == b'mean':
loss = numpy.mean(loss)
elif reduction == b'sum':
loss = numpy.sum(loss)
return (loss, )
class NegativeLogLikelihoodLoss(OpRun):
"""
Python runtime for function *NegativeLogLikelihoodLoss*.
"""
atts = {'reduction': b'mean', 'ignore_index': -1}
def __init__(self, onnx_node, desc=None, **options):
OpRun.__init__(self, onnx_node, desc=desc,
expected_attributes=NegativeLogLikelihoodLoss.atts,
**options)
def _run(self, x, target, weight=None, attributes=None, verbose=0, fLOG=None): # pylint: disable=W0221
return _compute_negative_log_likelihood_loss(
x, target, weight=weight, reduction=self.reduction, # pylint: disable=E1101
ignore_index=self.ignore_index) # pylint: disable=E1101
| [
"[email protected]"
] | |
783c38fd2518fc737458e6c949ea4c94fda25305 | a598604583977761b57ba88dd34455d7127876ec | /frb/scripts/image.py | 6e189c8c6588927cfbe005667e43d85e94a7c53c | [
"BSD-3-Clause"
] | permissive | FRBs/FRB | b973cd3d72489fe1f8e7957d6f29f5c657faf4a3 | ef68b9a9fbb6d042b7a8c6c8818d55668d7e8639 | refs/heads/main | 2023-08-04T15:00:52.591942 | 2023-08-02T13:36:44 | 2023-08-02T13:36:44 | 85,193,203 | 52 | 26 | BSD-3-Clause | 2023-08-02T13:36:46 | 2017-03-16T12:26:19 | Jupyter Notebook | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python
"""
Script generate an image of an FRB
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
from IPython import embed
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(description='Script to make a quick image figure [v1.0]')
parser.add_argument("fits_file", type=str, help="Image FITS file with WCS")
parser.add_argument("frb_coord", type=str, help="FRB Coordinates, e.g. J081240.7+320809 or 122.223,-23.2322 or 07:45:00.47,34:17:31.1 or FRB name (FRB180924)")
parser.add_argument("--imsize", default=30., type=float, help="Image size in arcsec")
parser.add_argument("--vmnx", type=str, help="Image scale: vmin,vmax")
parser.add_argument("--outfile", default='image.png', type=str, help="Output filename")
if options is None:
pargs = parser.parse_args()
else:
pargs = parser.parse_args(options)
return pargs
def main(pargs):
""" Run
"""
import warnings
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
from astropy import units
from frb import frb
from frb.figures import galaxies as ffgal
from frb.figures import utils as ffutils
from linetools.scripts.utils import coord_arg_to_coord
# Load up
hdu = fits.open(pargs.fits_file)
icoord = coord_arg_to_coord(pargs.frb_coord)
# Parse
if pargs.vmnx is not None:
tstr = pargs.vmnx.split(',')
vmnx = (float(tstr[0]), float(tstr[1]))
else:
vmnx = (None,None)
# Dummy FRB object
FRB = frb.FRB('TMP', icoord, 0.)
FRB.set_ee(1.0, 1.0, 0., 95.)
fig = plt.figure(figsize=(7, 7))
ffutils.set_mplrc()
ffgal.sub_image(fig, hdu, FRB, vmnx=vmnx, cmap='gist_heat',
frb_clr='white', imsize=pargs.imsize) #img_center=HG190608.coord,
# Layout and save
plt.tight_layout(pad=0.2, h_pad=0.1, w_pad=0.1)
plt.savefig(pargs.outfile, dpi=300)
plt.close()
print('Wrote {:s}'.format(pargs.outfile))
| [
"[email protected]"
] | |
55e6c8bae8bfe8625f52f2a79caa71ab90ae8587 | aa0f29e5b6832ab36f440f964b403712b2312fa8 | /lab/1/lab1.py | 2df7edbd876b74984911220aa1ab34efcadc1885 | [] | no_license | zweed4u/Embedded-Systems-Design-II | d8d3f608a77ba6919ca2cb692832a60400498265 | c525d481f51c7b6d6fdbbd111b0211c6a24d2af1 | refs/heads/master | 2021-05-11T16:02:42.067488 | 2018-04-20T18:49:34 | 2018-04-20T18:49:34 | 117,721,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,533 | py | #!/usr/bin/python
"""
Zachary Weeden
CPET-563 Lab 1
January 23, 2018
"""
import os
import sys
import math
from PyQt4 import QtGui, QtCore
hw_flag = 0 # 0 = read from file
class Encoders:
def __init__(self, hw_flag):
"""Constructor for Encoder class"""
self.hw_flag = hw_flag
self.contents = None
self.number_of_instructions = 0
def get_encoders(self):
"""
Opens the encoders text file and stores it into class attribute
Assumes that the encoders txt file is named appropriately and in the same directory as invocation
:return: str - the contents of the encoders.txt file
"""
with open(os.getcwd() + '/encoders.txt', 'r') as f:
self.contents = f.read()
return self.contents
def parse_file(self):
"""
Parse the actual encoder text into usable data structure
:return: dict - {'header_from_1st_row': [array_of_values_for_given_column]}
"""
# Read from file
if self.hw_flag == 0:
if self.contents is None:
self.get_encoders()
# assumes format of l_dir\tleft\tr_dir\tright as the first row - data starts 2nd row
headers = self.contents.split('\n')[0].split()
control_bits = self.contents.split('\n')[1:]
l_dir = []
left = []
r_dir = []
right = []
encoder_map = {}
for row in control_bits: # ensure that the line from the file was not empty
if row.strip():
l_dir.append(int(row[0]))
left.append(int(row[2]))
r_dir.append(int(row[4]))
right.append(int(row[6]))
self.number_of_instructions += 1
encoder_map[headers[0]] = l_dir
encoder_map[headers[1]] = left
encoder_map[headers[2]] = r_dir
encoder_map[headers[3]] = right
return encoder_map
elif self.hw_flag == 1:
return 0
class Board(QtGui.QGraphicsView):
def __init__(self, parent):
super(Board, self).__init__()
# Timer update to be between 2 and 4 seconds
self.timer_init = 2000.0
self.parent = parent
self.scene = QtGui.QGraphicsScene(self)
self.setScene(self.scene)
# Should be able to dynamically grab board dimensions based on machine
self.board_width = 50
self.board_height = 50
# effectively sets the logical scene coordinates from 0,0 to 1000,1000
self.scene.addRect(0, 0, self.board_width, self.board_height)
self.rover = Rover(self, self.board_width, self.board_height)
self.scene.addItem(self.rover)
self.timer = QtCore.QBasicTimer()
self.rover.setPos(25, 25)
def startGame(self):
self.status = 0
self.rover.setPos(25, 25)
self.timer.start(self.timer_init, self)
def timerEvent(self, event):
if self.status == 0:
self.status = self.rover.basic_move()
else:
self.timer.stop()
def resizeEvent(self, event):
super(Board, self).resizeEvent(event)
self.fitInView(self.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
class LabOne(QtGui.QMainWindow):
def __init__(self, parsed_encoders):
super(LabOne, self).__init__()
self.encoders = parsed_encoders
self.statusBar().showMessage('CPET-563 Lab 1 :: Zachary Weeden 2018')
exitAction = QtGui.QAction(QtGui.QIcon('exit.png'), '&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QtGui.qApp.quit)
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
self.hLayout = QtGui.QHBoxLayout()
self.dockFrame = QtGui.QFrame()
self.dockFrame.setLayout(self.hLayout)
self.dock = QtGui.QDockWidget(self)
self.dock.setWidget(self.dockFrame)
self.addDockWidget(QtCore.Qt.DockWidgetArea(4), self.dock)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.board = Board(self)
self.vLayout = QtGui.QVBoxLayout()
self.vLayout.addWidget(self.board)
self.frame = QtGui.QFrame(self)
self.frame.setLayout(self.vLayout)
self.setCentralWidget(self.frame)
self.setWindowTitle("Lab 1")
self.showMaximized()
self.show()
class Rover(QtGui.QGraphicsItem):
def __init__(self, parent, board_width, board_height):
super(Rover, self).__init__()
self.instruction_step = 0
self.angle = 0
self.color = QtGui.QColor(0, 0, 255)
self.rover_width = 8
self.rover_height = 6
self.board_width = board_width
self.board_height = board_height
self.parent = parent
def boundingRect(self):
"""
Bounds of the rover object
:return: QtCore.QRectF object
"""
return QtCore.QRectF(-self.rover_width / 2, -self.rover_height / 2,
self.rover_width, self.rover_height)
def set_color(self, color_tuple):
"""
Set the color of the rover rectangle
:param color_tuple: tuple of RGB vals
:return:
"""
self.color = QtGui.QColor(color_tuple[0], color_tuple[1],
color_tuple[2])
def paint(self, painter, option, widget):
"""
Draw the rover object rectangle
:param painter:
:param option:
:param widget:
:return:
"""
painter.drawPixmap(self.boundingRect(), QtGui.QPixmap("rover.svg"),
QtCore.QRectF(0.0, 0.0, 640.0, 480.0))
def basic_move(self):
"""
Determines the next coordinates based of encoder arrays
:return: 0
"""
left_encoder = self.parent.parent.encoders['left']
right_encoder = self.parent.parent.encoders['right']
if self.instruction_step < len(
left_encoder) or self.instruction_step < len(right_encoder):
left_ticks, right_ticks = left_encoder[self.instruction_step], \
right_encoder[self.instruction_step]
print "Left ticks: {} Right ticks: {}".format(left_ticks,
right_ticks)
if left_ticks != right_ticks:
# Different values for each encoder - parse
# I'm so sorry - this is awful
print "Rotating"
angle = (left_ticks - right_ticks) * 45
self.angle += angle
self.rotate(angle)
else:
forward_x = max(left_ticks, right_ticks) * math.cos(
self.angle * (math.pi / 180))
forward_y = -1 * (max(left_ticks, right_ticks) * math.sin(
-1 * self.angle * (math.pi / 180)))
self.setX(self.x() + forward_x)
self.setY(self.y() + forward_y)
self.instruction_step += 1
else:
print "Encoder text file fully traversed"
return 0
if __name__ == '__main__':
parsed_encoders = Encoders(hw_flag).parse_file()
app = QtGui.QApplication(sys.argv)
app.setFont(QtGui.QFont("Helvetica", 10))
LabOne(parsed_encoders).board.startGame()
sys.exit(app.exec_())
| [
"[email protected]"
] | |
879fdbd096ab73377632692fa3713686b5f68cc5 | eb87c8b1ce8591d207643d3924b7939228f1a4fe | /conformance_suite/try_except_catch_final_some_exn.py | 5b54eda44dcdf5cc6bfd75757a4335e5e481a7c3 | [] | no_license | brownplt/insta-model | 06543b43dde89913c219d476ced0f51a439add7b | 85e2c794ec4b1befa19ecb85f2c8d2509ec8cf42 | refs/heads/main | 2023-08-30T19:06:58.083150 | 2023-05-03T18:53:58 | 2023-05-10T22:29:18 | 387,500,638 | 5 | 0 | null | 2022-04-23T23:06:52 | 2021-07-19T14:53:09 | Racket | UTF-8 | Python | false | false | 248 | py | # try_except_catch_final_some_exn.py
# This should pass.
# This should terminate.
def f():
try:
raise Exception("foo")
except Exception:
return 2
else:
return 3
finally:
return 42
assert f() is 42
| [
"[email protected]"
] | |
5ffb069ccecb464878b86b09af6fa21444860185 | 55c93a864428a26c50f43f97f6834fd27dd0210f | /ufs/interface/fviews.py | 7d66e6beeb80fab20f0625975ae173a0a48604b0 | [] | no_license | sun3shines/ufs | 12a635b67a7c39eb56d2e691038f4690c390a72c | 7fa7ebdefc245dceca4a1e7f394cc6e5640b71b7 | refs/heads/master | 2021-01-18T23:49:50.292917 | 2016-03-28T22:30:45 | 2016-03-28T22:30:45 | 53,540,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,247 | py | # -*- coding: utf-8 -*-
import json
from cloudcommon.common.bufferedhttp import jresponse
from cloudcommon.common.common.swob import Response
from ufs.utils.path import path2o
from ufs.interface.fst import FSt
def get(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status=404)
app_iter = s.get()
response = Response(app_iter=app_iter,request=req)
return req.get_response(response)
def put(req):
param = req.headers
path = path2o(param.get('path'))
s = FSt(path)
md5 = req.headers.get('md5')
datatype = req.headers.get('datatype')
fileinput = req.environ['wsgi.input']
ecode = s.put(md5,datatype,fileinput)
return Response(status=ecode)
def post(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status = 404)
attrs = req.headers
ecode = s.setm(attrs)
return Response(status=ecode)
def head(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
is_swift = param.get('is_swift')
s = FSt(path)
ecode = 200
if not s.exists:
ecode = 404
return Response(ecode)
data = s.getm()
if 'true' == is_swift:
return Response(status=ecode,headers=data)
else:
return Response(json.dumps(data),status=ecode)
def copy(req):
param = json.loads(req.body)
src = path2o(param.get('src'))
dst = path2o(param.get('dst'))
s = FSt(src)
d = FSt(dst)
if not s.exists:
return Response(status=404)
ecode = s.copy(d)
return Response(status = ecode)
def move(req):
param = json.loads(req.body)
src = path2o(param.get('src'))
dst = path2o(param.get('dst'))
s = FSt(src)
d = FSt(dst)
if not s.exists:
return Response(status=404)
ecode = s.move(d)
return Response(status=ecode)
def delete(req):
param = json.loads(req.body)
path = path2o(param.get('path'))
s = FSt(path)
if not s.exists:
return Response(status=404)
ecode = s.delete()
return Response(status=ecode)
| [
"[email protected]"
] | |
ae870734f57f441d0112a8161a621c42015ae31e | c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb | /N_NumpyDemo/NumpyDemo6.py | fb9ff8771e9c2fc83e38fa3c8989b3ca34a5357e | [] | no_license | GIS90/python_base_use | e55d55f9df505dac45ddd332fb65dcd08e8e531f | 7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1 | refs/heads/master | 2020-04-02T08:33:49.461307 | 2018-10-23T03:33:41 | 2018-10-23T03:33:41 | 154,249,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
------------------------------------------------
"""
import numpy as np
arr1 = np.arange(0, 60, 10).reshape(-1, 1)
arr2 = np.arange(0, 5)
print arr1.shape, arr2.shape
c = arr1 + arr2
# a = arr2.repeat(6, axis=0)
# print a
# x, y = np.ogrid[0:5, 0:5]
# print x, y
#
# a = np.arange(6).reshape(2, 3)
# b = np.arange(6, 12).reshape(3, 2)
# c = np.dot(a, b)
# print c
a = np.arange(0, 12).reshape(3, 4)
a.tofile("array.txt")
| [
"[email protected]"
] | |
6f83cb13a9d62661ea4d59a51c90be850916f02b | 7cc86034b847fcf239ef8184aa7c062b35f28d08 | /venv/bin/python-config | 41e0320aea18839a8bdd9f35b36863b3e87a44ea | [] | no_license | asimonia/headlines | a6b480e6d1a029d1890f4abd8153fdfbc3cbfcb8 | 43cde9690d8669b82c024788392551ed07a7763b | refs/heads/master | 2021-01-17T16:09:27.467950 | 2016-07-20T00:05:12 | 2016-07-20T00:05:12 | 63,735,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | #!/Users/alexsimonian/Desktop/Example/chapter5/venv/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"[email protected]"
] | ||
a6e7e3c5d2a02e001d1ac9086bb44e9729676268 | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /dataset_real_64.py | 28aa5d22487066f0e4d762be774ef937a5159856 | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,404 | py | import uproot3 as uproot
import torch
import logging
import numpy as np
import sklearn.preprocessing as skp
class Data():
def __init__(self):
self.file = uproot.open('reconstructible_mc5a02_rconsthits_geom.root')
self.n_pot = 990678399
self.n_bunches = self.n_pot / 16e6
self.cdc_tree = self.file['cdc_geom/wires']
self.data_tree = self.file['noise/noise']
self.qt = skp.QuantileTransformer(output_distribution='normal', n_quantiles=5000)
self.minmax = skp.MinMaxScaler(feature_range=(-1, 1))
def get_cdc_tree(self):
return self.cdc_tree.array()
def load(self):
self.data = self.data_tree.array()
tree = self.data
# Set up the data, throw away the invalid ones
wire = tree['wire']
event_id = tree['event_id']
layer = tree['layer']
edep = tree['edep']
doca = tree['doca']
t = tree['t']
dbg_x = tree['x']
dbg_y = tree['y']
dbg_z = tree['z']
track_id = tree['track_id']
pid = tree['pid']
_select = (wire>=0) * (edep>1e-6)
self.layer = layer[_select]
self.event_id = event_id[_select]
self.t = t[_select]
self.dbg_x = dbg_x[_select]
self.dbg_y = dbg_y[_select]
self.dbg_z = dbg_z[_select]
self.track_id = track_id[_select]
self.pid = pid[_select]
self.doca = doca[_select]
self.wire = wire[_select]
self.edep = edep[(tree['wire']>=0) * (edep>1e-6)]
self.test_edep = self.edep[-1*2048:]
self.test_t = self.t[-1*2048:]
self.test_doca = self.doca[-1*2048:]
self.test_wire = self.wire[-1*2048:]
self.edep = self.edep[:64*2048]
self.t = self.t[:64*2048]
self.doca = self.doca[:64*2048]
self.wire = self.wire[:64*2048]
logging.info('Train size wire %d pid %d doca %d' % (self.wire.size, self.pid.size, self.doca.size))
# Format data into tensor
self.train = np.array([self.edep, self.t, self.doca], dtype=np.float32).T
self.test = np.array([self.test_edep, self.test_t, self.test_doca], dtype=np.float32).T
logging.info('train shape {0}'.format(self.train.shape))
logging.info('test shape {0}'.format(self.test.shape))
def preprocess(self):
self.qt.fit(self.train)
self.train_qt = self.qt.transform(self.train)
self.test_qt = self.qt.transform(self.test)
self.minmax.fit(self.train_qt)
self.train_minmax = self.minmax.transform(self.train_qt)
self.test_minmax = self.minmax.transform(self.test_qt)
def inv_preprocess(self, tensor):
inv_tensor = self.qt.inverse_transform(
self.minmax.inverse_transform(tensor.detach().cpu().numpy()))
return torch.tensor(inv_tensor)
def diagnostic_plots(self, output_dir):
# Some diagnostic plots
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
plt.hist(np.log10(self.edep), bins=50)
plt.savefig(output_dir+'train_edep.png')
plt.clf()
plt.hist(np.log10(self.t), bins=50)
plt.savefig(output_dir+'train_t.png')
plt.clf()
plt.hist(self.doca, bins=50)
plt.savefig(output_dir+'train_doca.png')
plt.clf()
#plt.figure(figsize=(6,6))
#plt.scatter(dbg_z, dbg_y, s=edep*1e3, c=doca, cmap='inferno')
#plt.savefig(output_dir+'train_scatter.png')
#plt.clf()
__inv_train = self.inv_preprocess(torch.from_numpy(self.train_minmax))
plt.hist(np.log10(__inv_train[:,0]), bins=50, alpha=0.7)
plt.hist(np.log10(self.edep), bins=50, alpha=0.7)
plt.savefig(output_dir+'inv_transform.png')
plt.clf()
def _chunk(self, continuous_features, discrete_features, seq_len, batch_size):
data_torch = torch.from_numpy(continuous_features).T
chunk_size = seq_len
chunk_stride = seq_len
data_chunked = data_torch.unfold(1,
chunk_size, chunk_stride) # (feature, batch, seq)
n_chunks = data_chunked.shape[1]
n_features = data_chunked.shape[0]
wire_torch = torch.from_numpy(discrete_features).long().unsqueeze(0)
wire_chunked = wire_torch.unfold(1,
chunk_size, chunk_stride) # (feature, batch, seq)
logging.info('Continuous features shape: {0} Discrete features shape: {1}'.format(data_chunked.shape, wire_chunked.shape))
dataset = torch.utils.data.TensorDataset(data_chunked.permute(1, 0, 2),
wire_chunked.permute(1, 0, 2))
loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size, shuffle=True, pin_memory=True)
return loader, dataset, n_chunks
def chunk(self, seq_len, batch_size):
self.train_loader, self.train_dataset, self.n_chunks = self._chunk(self.train_minmax,
self.wire, seq_len, batch_size)
self.test_loader, self.test_dataset, self.n_test_chunks = self._chunk(self.test_minmax,
self.test_wire, seq_len, batch_size)
print('TRAIN CHUNKS:', self.n_chunks, ', TEST CHUNKS:', self.n_test_chunks)
return self.train_loader, self.train_dataset, self.n_chunks
| [
"[email protected]"
] | |
40085d38f183f16add546c39dfb8ee6928005916 | a1ce33bcd9bde0e044c88dbd591774952f0469dc | /unidade-2/media-aluno/media_aluno.py | bea1a5d91fd8c970eea39a344127e9e33f268034 | [] | no_license | juliafealves/tst-lp1 | 91420522fcc9e3f66ad451aa4c500f5df24e51b7 | 1f324d66429a95796e02f718f0cd35dd7e7bd4a2 | refs/heads/master | 2021-09-07T11:08:34.420830 | 2018-02-22T02:40:50 | 2018-02-22T02:40:50 | 107,728,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | # coding: utf-8
# Média do Aluno
# (C) 2017, Júlia Alves / UFCG Programação 1
peso1 = 2.0
peso2 = 3.0
peso3 = 5.0
# Entrada dos dados referente as notas.
nota1 = float(raw_input())
nota2 = float(raw_input())
nota3 = float(raw_input())
# Calculando a média
media = (nota1 * peso1 + nota2 * peso2 + nota3 * peso3) / 10
# Imprimindo a média do aluno.
print media
| [
"[email protected]"
] | |
8e2ec3b08050f7aff3bcbed97a713ccfccce0ee8 | 68c22afafa16c53ed61874ae13463d4b6831a695 | /examples/addresses.py | 7ac7986a76fe4cc8b1687b3b2aca93acf8ac9b7c | [
"MIT"
] | permissive | wielandgmeiner/embit | edc7500bfca15472a21e3df80c961efc740614bd | caebc88c749994f2eb704231ba20c7fa10d70d4d | refs/heads/master | 2020-12-19T21:57:42.027714 | 2020-01-23T22:35:49 | 2020-01-23T22:35:49 | 235,864,966 | 0 | 0 | null | 2020-01-23T19:00:23 | 2020-01-23T19:00:23 | null | UTF-8 | Python | false | false | 1,761 | py | from embit import script
from embit import ec
from embit.networks import NETWORKS
from binascii import unhexlify, hexlify
def main():
# all from the same private key
prv = ec.PrivateKey.from_wif("L2e5y14ZD3U1J7Yr62t331RtYe2hRW2TBBP8qNQHB8nSPBNgt6dM")
pub = prv.get_public_key()
print("Public key:")
print(hexlify(pub.serialize()))
# we will generate regtest addresses
network = NETWORKS['regtest']
print("Legacy (pay to pubkey hash):")
sc = script.p2pkh(pub)
# default network is main
print(sc.address(network))
print("Segwit (pay to witness pubkey hash):")
sc = script.p2wpkh(pub)
print(sc.address(network))
print("Nested segwit (p2sh-p2wpkh):")
sc = script.p2sh(script.p2wpkh(pub))
print(sc.address(network))
print("\nMiltisig address (2 of 3):")
# unsorted
pubs = [
ec.PublicKey.parse(unhexlify("02edd7a58d2ff1e483d35f92a32e53607423f936b29bf95613cab24b0b7f92e0f1")),
ec.PublicKey.parse(unhexlify("03a4a6d360acc45cb281e0022b03218fad6ee93881643488ae39d22b854d9fa261")),
ec.PublicKey.parse(unhexlify("02e1fdc3b011effbba4b0771eb0f7193dee24cfe101ab7e8b64516d83f7116a615")),
]
# 2 of 3 multisig script
sc = script.multisig(2, pubs)
print("Legacy, unsorted (p2sh):")
redeem_sc = script.p2sh(sc)
print(redeem_sc.address(network))
print("Native segwit, sorted (p2wsh):")
sc = script.multisig(2, sorted(pubs))
witness_sc = script.p2wsh(sc)
print(witness_sc.address(network))
print("Nested segwit, sorted (p2sh-p2wsh):")
sc = script.multisig(2, sorted(pubs))
witness_sc = script.p2wsh(sc)
redeem_sc = script.p2sh(witness_sc)
print(redeem_sc.address(network))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
11e304d584b00995908893ca77719edba9e57c24 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/connectwise/icon_connectwise/actions/get_company/__init__.py | 38402cddccf5e64d5a3112fcab87cae6b54464fb | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 71 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import GetCompany
| [
"[email protected]"
] | |
88a3377b8c3c25f15919717ee9a53cf77e86855d | 528da3624bb03db22c4b97870de0bfd0205c5908 | /CODETOOL/search.py | 3a911964be67b136b0e44d85164102610be01966 | [
"MIT"
] | permissive | ihgazni2/shproperty | e4f6e011a1fe03695ffd1f5933deb638a481490e | fc994d8228d20d00dbdc060941fce81a56bba673 | refs/heads/master | 2020-04-13T02:22:45.413052 | 2019-09-19T09:26:48 | 2019-09-19T09:26:48 | 162,900,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,052 | py |
import sys
import os
import subprocess
import shlex
def pipe_shell_cmds(shell_CMDs):
'''
shell_CMDs = {}
shell_CMDs[1] = 'netstat -n'
shell_CMDs[2] = "awk {'print $6'}"
'''
len = shell_CMDs.__len__()
p = {}
p[1] = subprocess.Popen(shlex.split(shell_CMDs[1]), stdout=subprocess.PIPE,stderr=subprocess.PIPE)
for i in range(2,len):
p[i] = subprocess.Popen(shlex.split(shell_CMDs[i]), stdin=p[i-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if(len > 1):
p[len] = subprocess.Popen(shlex.split(shell_CMDs[len]), stdin=p[len-1].stdout, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = p[len].communicate()
if(len > 1):
for i in range(2,len+1):
returncode = p[i].wait()
else:
returncode = p[len].wait()
return(result)
#dname="./Main" dname="./edict"
def check(fname,suffix='js',dname="./"):
fl = []
shell_CMDs = {}
shell_CMDs[1] = 'tree -f ' + dname
shell_CMDs[2] = 'egrep ' + '"' + suffix +'|sh"'
shell_CMDs[3] = 'egrep ""'
rslt = pipe_shell_cmds(shell_CMDs)[0].decode('utf-8')
fl = rslt.replace(chr(9500),"").replace(chr(9472),"").replace(chr(9474),"").replace("\xa0","").replace(chr(9492),"").split("\n")
for i in range(0,fl.__len__() -1):
ele = fl[i].strip(' ').strip('\t').strip(' ').strip('\t')
fl[i] = 'cat ' + ele
fl.pop(fl.__len__() -1)
for cmd in fl:
shell_CMDs = {}
shell_CMDs[1] = cmd
shell_CMDs[2] = "egrep " + '"' + fname + '"'
rslt = pipe_shell_cmds(shell_CMDs)
if(rslt == (b'', b'')):
pass
else:
print("---location---")
print(cmd)
print("---rslt----")
print(rslt[0].decode('utf-8'))
print("----info---")
print(rslt[1].decode('utf-8'))
try:
check(sys.argv[1],sys.argv[2],sys.argv[3])
except:
try:
check(sys.argv[1],sys.argv[2])
except:
check(sys.argv[1])
else:
pass
else:
pass
| [
"[email protected]"
] | |
972c53032105db50a0fd1fedbf84567a14467411 | bc441bb06b8948288f110af63feda4e798f30225 | /influxdb_service_sdk/model/cmdb_extend/subsystem_dependency_pb2.pyi | dbfaa61f6584c29813088feb8ee25ba84930e6f6 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,360 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from influxdb_service_sdk.model.cmdb_extend.app_dependency_pb2 import (
AppDependency as influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class SubsystemDependency(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
class ConnectSubsystems(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
abbreviation = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
name = ... # type: typing___Text
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency.ConnectSubsystems: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency.ConnectSubsystems: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
abbreviation = ... # type: typing___Text
name = ... # type: typing___Text
object_id = ... # type: typing___Text
instance_id = ... # type: typing___Text
@property
def components(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]: ...
@property
def connect_subsystems(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[SubsystemDependency.ConnectSubsystems]: ...
def __init__(self,
*,
abbreviation : typing___Optional[typing___Text] = None,
name : typing___Optional[typing___Text] = None,
object_id : typing___Optional[typing___Text] = None,
instance_id : typing___Optional[typing___Text] = None,
components : typing___Optional[typing___Iterable[influxdb_service_sdk___model___cmdb_extend___app_dependency_pb2___AppDependency]] = None,
connect_subsystems : typing___Optional[typing___Iterable[SubsystemDependency.ConnectSubsystems]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> SubsystemDependency: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> SubsystemDependency: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"abbreviation",b"abbreviation",u"components",b"components",u"connect_subsystems",b"connect_subsystems",u"instance_id",b"instance_id",u"name",b"name",u"object_id",b"object_id"]) -> None: ...
| [
"[email protected]"
] | |
578bad733cfd9b560cf88476f651afaf40a28da1 | 68d38b305b81e0216fa9f6769fe47e34784c77f2 | /auto_generate_scripts/server/script_generator/pcadvisor_co_uk_generator.py | 981b44af8e52193c9704c79a9c7b23127324867a | [] | no_license | ADJet1437/ScrapyProject | 2a6ed472c7c331e31eaecff26f9b38b283ffe9c2 | db52844411f6dac1e8bd113cc32a814bd2ea3632 | refs/heads/master | 2022-11-10T05:02:54.871344 | 2020-02-06T08:01:17 | 2020-02-06T08:01:17 | 237,448,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,669 | py | # -*- coding: utf8 -*-
import sys
sys.path.append("../")
from server.gen_spiders import *
code_fragments = []
spa = SpiderGenerator()
return_code = spa.gen_import()
code_fragments.append(return_code)
return_code = spa.gen_init(spider_name = "Pcadvisor_co_ukSpider", spider_type = "AlaSpider", allowed_domains = "'pcadvisor.co.uk'", start_urls = "'http://www.pcadvisor.co.uk/review/'")
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "1", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.gen_request_single_url(url_xpath = "//a[starts-with(.,'>>')]/@href", level_index = "1", url_regex = "", product_fields = [])
code_fragments.append(return_code)
return_code = spa.gen_request_urls(urls_xpath = "//div[@class='bd']/h2/a/@href", level_index = "2", url_regex = "", include_original_url = "", params_xpath = {}, params_regex = {})
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "2", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.get_category(category_leaf_xpath = "//ul[contains(@class,'crumb')]/li[last()-1]/a//text()", category_path_xpath = "//ul[contains(@class,'crumb')]/li[position()<last()]/a//text()")
code_fragments.append(return_code)
return_code = spa.gen_product(sii_xpath = "//meta[@property='og:url']/@content", pname_xpath = "//ul[contains(@class,'crumb')]/li[last()]/a//text()", ocn_xpath = "//ul[contains(@class,'crumb')]/li[last()-1]/a//text()", pic_xpath = "//meta[@property='og:image']/@content", manuf_xpath = "")
code_fragments.append(return_code)
return_code = spa.gen_review(sii_xpath = "//meta[@property='og:url']/@content", pname_xpath = "//ul[contains(@class,'crumb')]/li[last()]/a//text()", rating_xpath = "translate(string(number(count(//img[@class='ratings' and contains(@src,'whitestarfilled')])+0.5*count(//img[@class='ratings' and contains(@src,'whiteHalfStar')]))),'0','')", date_xpath = "//time/@datetime", pros_xpath = "", cons_xpath = "", summary_xpath = "//meta[@property='og:description']/@content", verdict_xpath = "//*[contains(.,'VERDICT')]/following-sibling::p[1]/text()", author_xpath = "//meta[@name='author']/@content", title_xpath = "//meta[@property='og:title']/@content", award_xpath = "", awpic_xpath = "")
code_fragments.append(return_code)
return_code = spa.get_dbasecategoryname(dbcn = "pro")
code_fragments.append(return_code)
return_code = spa.get_sourcetestscale(scale = "5", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "product", field = "source_internal_id", regex = "((?<=\-)\d{7}(?=\/))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "source_internal_id", regex = "((?<=\-)\d{7}(?=\/))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "product", field = "ProductName", regex = "(\w.*(?=\sreview))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "ProductName", regex = "(\w.*(?=\sreview))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "TestDateText", regex = "(\d[^/s]*(?=T))", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.save_product()
code_fragments.append(return_code)
return_code = spa.save_review(review_type = "pro")
code_fragments.append(return_code)
script_name = "/home/alascrapy/alaScrapy/alascrapy/spiders/techadvisor_co_uk.py"
fh = open(script_name, 'w+')
for code in code_fragments:
fh.write(code)
fh.write("")
fh.close()
| [
"[email protected]"
] | |
a746e06768428227791bfa75408479c3b234fd09 | 7246faf9a222269ce2612613f58dc5ff19091f10 | /프로그래머스/프로그래머스 - 단어 변환.py | dec405ba20f05310d904b9b961854c446de5742f | [] | no_license | gusdn3477/Algorithm_Study | 87a2eb72a8488d9263a86db70dadc7944434d41d | 3fefe1dcb40122157845ffc542f41cb097711cc8 | refs/heads/main | 2023-08-30T12:18:21.412945 | 2021-09-28T13:00:11 | 2021-09-28T13:00:11 | 308,364,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | from collections import deque
def BFS(begin, target, words):
queue = deque()
queue.append((begin, 0))
visited = [0] * len(words)
while queue:
a = queue.popleft()
if a[0] == target:
return a[1]
for i in range(len(words)):
ct = 0
for j in range(len(words[i])):
if a[0][j] == words[i][j]:
ct += 1
if ct == len(words[i]) - 1 and visited[i] == 0:
visited[i] = 1
queue.append((words[i], a[1] + 1))
return 0
def solution(begin, target, words):
answer = BFS(begin, target, words)
return answer | [
"[email protected]"
] | |
4f151b429aa31d456b30c77bec814947baa83c6c | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_8783.py | 7c63375310d5ee9587704b93f42ed2a9a09dec62 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,334 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((173, 270, 272), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((211, 644, 751), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((469, 622, 469), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((510, 921, 965), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((579, 663, 401), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((5, 737, 745), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((735, 394, 483), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((144, 838, 695), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((384, 179, 480), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((696, 10, 985), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((614, 306, 548), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((185, 810, 708), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((273, 108, 982), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((927, 982, 765), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((478, 451, 432), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((227, 697, 680), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((696, 692, 471), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((429, 560, 934), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((568, 585, 767), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((869, 113, 920), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((187, 316, 608), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
10d5e08df192fe85faf6f5f7bed5a0067b89a403 | cea03b578f0f6207afe5056611090848ab76bd23 | /subject/qiefenhuisu.py | 6167beacc3d6e3371e6602df397b51dc1892fe58 | [] | no_license | swq90/stock | fa295f4fa0bf6a4d8afe8a71c02cc54fc7442bcd | a49ae395de82ecdfa38220f4fdbcaf4da6a39719 | refs/heads/master | 2021-07-14T03:42:21.950897 | 2020-10-14T11:52:24 | 2020-10-14T11:52:24 | 215,325,863 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | import pandas as pd
from numpy import arange
import stock.util.sheep as sheep
import stock.limit_up.get_limit_stock as gls
from stock.sql.data import save_data,read_data
from stock.util.basic import basic
start_date='20180101'
end_date='20181231'
PRICEB='close'
def fun1(limit_type='up'):
res = pd.DataFrame()
#
data=read_data('daily',start_date=start_date,end_date=end_date)
limit=read_data('stk_limit',start_date=start_date,end_date=end_date)
data=data.merge(limit,on=['ts_code','trade_date'])
data['is_roof'] = data.apply(lambda x: 99 if x['close'] == x['up_limit' ] else 1 if x['close'] == x[
'down_limit'] else x['pct_chg'], axis=1)
for rate in [-99]+list(range(-10,10))+[99]:
print(rate)
df=data.loc[(data['is_roof']>=rate)&(data['is_roof']<(rate+1))].copy()
if df.empty:
continue
# df['pct']=(df['close']/df['open']-1)*100
# res.loc[rate,'pct']=df['pct'].mean()
wool=sheep.wool2(df[['ts_code','trade_date']],data,PRICEB=PRICEB,days=1)
res.loc[rate,'mean']=wool.iloc[:,-3].mean()
res.loc[rate, 'n'] = wool.iloc[-1, -2]
res.loc[rate, 'all_pct'] = wool.iloc[-1, -1]
save_data(res,'pct_chg_cut_res%s-%s.csv'%(start_date,end_date))
def cut_stock():
pass
def func2():
data=read_data('stock_basic')
fun1()
print() | [
"[email protected]"
] | |
7833aa53c04a242bf777342ba9ef4c4e33a99fb4 | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/fusion/tests/RIST/API/Deprecated/F172/F172-BB-NegativeTests_DCS.py | 640d1047e65082453fc1cf09d069f13ba4ecbabb | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | admin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}
enclosure_name = "0000A66101"
drive_enclosure_name = "0000A66101, bay 7"
expected_number_of_DE = 6
expected_number_of_drives = 4
| [
"[email protected]"
] | |
ada7780e5b4601acc875f95342e323451cfa5981 | 709412f8e5ddd778f7b007ba9a6e1c1dd6924425 | /tools/foozzie/v8_foozzie.py | 222a3d8cfb2600817275ad9617a56d77dd547a31 | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro"
] | permissive | WilliamVV/v8 | f0737c5dfda0e0c11c6fffb735de7dc7d043afc1 | 2a60fd49db2d787f0dd6a7a731485011ec89d35f | refs/heads/master | 2021-04-29T03:45:07.298299 | 2017-01-04T10:24:48 | 2017-01-04T10:24:48 | 78,011,848 | 1 | 0 | null | 2017-01-04T12:00:53 | 2017-01-04T12:00:52 | null | UTF-8 | Python | false | false | 9,072 | py | #!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
V8 correctness fuzzer launcher script.
"""
import argparse
import hashlib
import itertools
import json
import os
import re
import sys
import traceback
import v8_commands
import v8_suppressions
CONFIGS = dict(
default=[],
validate_asm=['--validate-asm'], # Maybe add , '--disable-asm-warnings'
fullcode=['--nocrankshaft', '--turbo-filter=~'],
noturbo=['--turbo-filter=~', '--noturbo-asm'],
noturbo_opt=['--always-opt', '--turbo-filter=~', '--noturbo-asm'],
ignition_staging=['--ignition-staging'],
ignition_turbo=['--ignition-staging', '--turbo'],
ignition_turbo_opt=['--ignition-staging', '--turbo', '--always-opt'],
)
# Timeout in seconds for one d8 run.
TIMEOUT = 3
# Return codes.
RETURN_PASS = 0
RETURN_FAIL = 2
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PREAMBLE = [
os.path.join(BASE_PATH, 'v8_mock.js'),
os.path.join(BASE_PATH, 'v8_suppressions.js'),
]
FLAGS = ['--abort_on_stack_overflow', '--expose-gc', '--allow-natives-syntax',
'--invoke-weak-callbacks', '--omit-quit', '--es-staging']
SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
# Output for suppressed failure case.
FAILURE_HEADER_TEMPLATE = """#
# V8 correctness failure
# V8 correctness configs: %(configs)s
# V8 correctness sources: %(sources)s
# V8 correctness suppression: %(suppression)s
"""
# Extended output for failure case. The 'CHECK' is for the minimizer.
FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
# CHECK
#
# Compared %(first_config_label)s with %(second_config_label)s
#
# Flags of %(first_config_label)s:
%(first_config_flags)s
# Flags of %(second_config_label)s:
%(second_config_flags)s
#
# Difference:
%(difference)s
#
### Start of configuration %(first_config_label)s:
%(first_config_output)s
### End of configuration %(first_config_label)s
#
### Start of configuration %(second_config_label)s:
%(second_config_output)s
### End of configuration %(second_config_label)s
"""
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--random-seed', type=int, required=True,
help='random seed passed to both runs')
parser.add_argument(
'--first-arch', help='first architecture', default='x64')
parser.add_argument(
'--second-arch', help='second architecture', default='x64')
parser.add_argument(
'--first-config', help='first configuration', default='fullcode')
parser.add_argument(
'--second-config', help='second configuration', default='fullcode')
parser.add_argument(
'--first-d8', default='d8',
help='optional path to first d8 executable, '
'default: bundled in the same directory as this script')
parser.add_argument(
'--second-d8',
help='optional path to second d8 executable, default: same as first')
parser.add_argument('testcase', help='path to test case')
options = parser.parse_args()
# Ensure we make a sane comparison.
assert (options.first_arch != options.second_arch or
options.first_config != options.second_config) , (
'Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
assert options.second_config in CONFIGS
# Ensure we have a test case.
assert (os.path.exists(options.testcase) and
os.path.isfile(options.testcase)), (
'Test case %s doesn\'t exist' % options.testcase)
options.meta_data_path = os.path.join(
os.path.dirname(options.testcase),
'meta' + os.path.basename(options.testcase)[len('fuzz'):])
assert os.path.exists(options.meta_data_path), (
'Metadata %s doesn\'t exist' % options.meta_data_path)
# Use first d8 as default for second d8.
options.second_d8 = options.second_d8 or options.first_d8
# Ensure absolute paths.
if not os.path.isabs(options.first_d8):
options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
if not os.path.isabs(options.second_d8):
options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
# Ensure executables exist.
assert os.path.exists(options.first_d8)
assert os.path.exists(options.second_d8)
# Ensure we use different executables when we claim we compare
# different architectures.
# TODO(machenbach): Infer arch from gn's build output.
if options.first_arch != options.second_arch:
assert options.first_d8 != options.second_d8
return options
def test_pattern_bailout(testcase, ignore_fun):
"""Print failure state and return if ignore_fun matches testcase."""
with open(testcase) as f:
bug = (ignore_fun(f.read()) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression=bug)
return True
return False
def pass_bailout(output, step_number):
"""Print info and return if in timeout or crash pass states."""
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
return True
if output.HasCrashed():
print '# V8 correctness - C-R-A-S-H %d' % step_number
return True
return False
def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression=bug)
return True
return False
def main():
options = parse_args()
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
options.first_arch, options.first_config,
options.second_arch, options.second_config,
)
if test_pattern_bailout(options.testcase, suppress.ignore):
return RETURN_FAIL
# Get metadata.
with open(options.meta_data_path) as f:
metadata = json.load(f)
common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
first_config_flags = common_flags + CONFIGS[options.first_config]
second_config_flags = common_flags + CONFIGS[options.second_config]
def run_d8(d8, config_flags):
args = [d8] + config_flags + PREAMBLE + [options.testcase]
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
return v8_commands.Execute(
args,
cwd=os.path.dirname(options.testcase),
timeout=TIMEOUT,
)
first_config_output = run_d8(options.first_d8, first_config_flags)
# Early bailout based on first run's output.
if pass_bailout(first_config_output, 1):
return RETURN_PASS
if fail_bailout(first_config_output, suppress.ignore_by_output1):
return RETURN_FAIL
second_config_output = run_d8(options.second_d8, second_config_flags)
# Bailout based on second run's output.
if pass_bailout(second_config_output, 2):
return RETURN_PASS
if fail_bailout(second_config_output, suppress.ignore_by_output2):
return RETURN_FAIL
difference = suppress.diff(
first_config_output.stdout, second_config_output.stdout)
if difference:
# The first three entries will be parsed by clusterfuzz. Format changes
# will require changes on the clusterfuzz side.
first_config_label = '%s,%s' % (options.first_arch, options.first_config)
second_config_label = '%s,%s' % (options.second_arch, options.second_config)
hsh = lambda x: hashlib.sha1(x).hexdigest()[:8]
print FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
sources=','.join(map(hsh, metadata['sources'])),
suppression='', # We can't tie bugs to differences.
first_config_label=first_config_label,
second_config_label=second_config_label,
first_config_flags=' '.join(first_config_flags),
second_config_flags=' '.join(second_config_flags),
first_config_output=first_config_output.stdout,
second_config_output=second_config_output.stdout,
difference=difference,
)
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
print '# V8 correctness - pass'
return RETURN_PASS
if __name__ == "__main__":
try:
result = main()
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression='wrong_usage')
result = RETURN_FAIL
except Exception as e:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', sources='', suppression='internal_error')
print '# Internal error: %s' % e
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
sys.exit(result)
| [
"[email protected]"
] | |
45d304fdbcd33ec07e82461e41d163e4105d3a88 | 1bf9f6b0ef85b6ccad8cb029703f89039f74cedc | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_03_01_preview/operations/_bindings_operations.py | a9db1b1704bec066e868d63aacc3d590fdf220d9 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | VSChina/azure-cli-extensions | a1f4bf2ea4dc1b507618617e299263ad45213add | 10b7bfef62cb080c74b1d59aadc4286bd9406841 | refs/heads/master | 2022-11-14T03:40:26.009692 | 2022-11-09T01:09:53 | 2022-11-09T01:09:53 | 199,810,654 | 4 | 2 | MIT | 2020-07-13T05:51:27 | 2019-07-31T08:10:50 | Python | UTF-8 | Python | false | false | 47,247 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str, service_name: str, app_name: str, binding_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"appName": _SERIALIZER.url("app_name", app_name, "str"),
"bindingName": _SERIALIZER.url("binding_name", binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, service_name: str, app_name: str, binding_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"appName": _SERIALIZER.url("app_name", app_name, "str"),
"bindingName": _SERIALIZER.url("binding_name", binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, service_name: str, app_name: str, binding_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"appName": _SERIALIZER.url("app_name", app_name, "str"),
"bindingName": _SERIALIZER.url("binding_name", binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, service_name: str, app_name: str, binding_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"appName": _SERIALIZER.url("app_name", app_name, "str"),
"bindingName": _SERIALIZER.url("binding_name", binding_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(
resource_group_name: str, service_name: str, app_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"serviceName": _SERIALIZER.url("service_name", service_name, "str"),
"appName": _SERIALIZER.url("app_name", app_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class BindingsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.appplatform.v2022_03_01_preview.AppPlatformManagementClient`'s
:attr:`bindings` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, resource_group_name: str, service_name: str, app_name: str, binding_name: str, **kwargs: Any
) -> _models.BindingResource:
"""Get a Binding and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BindingResource or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResource]
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("BindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: Union[_models.BindingResource, IO],
**kwargs: Any
) -> _models.BindingResource:
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(binding_resource, (IO, bytes)):
_content = binding_resource
else:
_json = self._serialize.body(binding_resource, "BindingResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("BindingResource", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("BindingResource", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("BindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: _models.BindingResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation. Required.
:type binding_resource: ~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation. Required.
:type binding_resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: Union[_models.BindingResource, IO],
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation. Is either a model type
or a IO type. Required.
:type binding_resource: ~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResource]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, service_name: str, app_name: str, binding_name: str, **kwargs: Any
) -> None:
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, service_name: str, app_name: str, binding_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Operation to delete a Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: Union[_models.BindingResource, IO],
**kwargs: Any
) -> _models.BindingResource:
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(binding_resource, (IO, bytes)):
_content = binding_resource
else:
_json = self._serialize.body(binding_resource, "BindingResource")
request = build_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("BindingResource", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("BindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: _models.BindingResource,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the update operation. Required.
:type binding_resource: ~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the update operation. Required.
:type binding_resource: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: Union[_models.BindingResource, IO],
**kwargs: Any
) -> LROPoller[_models.BindingResource]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:param binding_name: The name of the Binding resource. Required.
:type binding_name: str
:param binding_resource: Parameters for the update operation. Is either a model type or a IO
type. Required.
:type binding_resource: ~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResource]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("BindingResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(
PollingMethod, ARMPolling(lro_delay, lro_options={"final-state-via": "azure-async-operation"}, **kwargs)
) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}"} # type: ignore
@distributed_trace
def list(
self, resource_group_name: str, service_name: str, app_name: str, **kwargs: Any
) -> Iterable["_models.BindingResource"]:
"""Handles requests to list all resources in an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal. Required.
:type resource_group_name: str
:param service_name: The name of the Service resource. Required.
:type service_name: str
:param app_name: The name of the App resource. Required.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BindingResource or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_03_01_preview.models.BindingResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-03-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BindingResourceCollection]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings"} # type: ignore
| [
"[email protected]"
] | |
2cd03a9d0b16f337862d0fd2c74a686391d89077 | 71875792ea0e1520e442b37f5296c90e37734581 | /Python/0200. Number of Islands.py | 0ec08708e2cfcdb2ea99ee06d1076437d79af32c | [
"MIT"
] | permissive | kangqiwang/myLeetcode | 401e806fbf43e8c3f2c70a720edfdb4c799ea897 | 40f6da994e443cd027752bd6f3ab33eb3556a220 | refs/heads/master | 2023-01-22T15:57:51.257811 | 2023-01-07T21:33:44 | 2023-01-07T21:33:44 | 190,908,551 | 0 | 0 | null | 2022-04-19T10:18:45 | 2019-06-08T16:11:35 | Python | UTF-8 | Python | false | false | 1,616 | py | '''
Given an m x n 2D binary grid grid which represents a map of '1's (land) and '0's (water), return the number of islands.
An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input: grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
Output: 1
Example 2:
Input: grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
Output: 3
'''
from typing import List
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
self.dfs(grid,i,j)
count += 1
return count
def dfs(self,grid,i,j):
grid[i][j] = 0
for dr,dc in (1,0), (-1,0), (0,-1), (0,1):
r = i + dr
c = j + dc
if 0 <= r < len(grid) and 0 <= c < len(grid[0]) and grid[r][c]=='1':
self.dfs(grid,r,c)
def dfs2(self,grid,i,j):
if i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j] != '1':
return
grid[i][j] = '#'
self.dfs(grid, i+1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i, j-1)
print(Solution().numIslands([
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
))
| [
"[email protected]"
] | |
0bcac2c7b50e1326ff493258771670b5fc8f0a84 | 6f05f7d5a67b6bb87956a22b988067ec772ba966 | /data/train/python/5a641fc39ee81f4a0e98c3bc90fd4dbd69b0a45abase.py | 5a641fc39ee81f4a0e98c3bc90fd4dbd69b0a45a | [
"MIT"
] | permissive | harshp8l/deep-learning-lang-detection | 93b6d24a38081597c610ecf9b1f3b92c7d669be5 | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | refs/heads/master | 2020-04-07T18:07:00.697994 | 2018-11-29T23:21:23 | 2018-11-29T23:21:23 | 158,597,498 | 0 | 0 | MIT | 2018-11-21T19:36:42 | 2018-11-21T19:36:41 | null | UTF-8 | Python | false | false | 892 | py | def get_class( kls ):
'''Get a class by its name'''
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def create_view_from_controller(controller):
'''Create an instance of a view object for the specified controller'''
return get_class(get_view_class_name(controller))()
def get_view_class_name(controller):
'''Generate the view class name from the controller.'''
return 'views.%(section_name)s.%(view_name)s' % {
'section_name': ".".join(controller.__class__.__module__.split('.')[1:]),
'view_name': controller.__class__.__name__
}
class Controller(object):
'''Base Controller class. Always subclass this.'''
def __init__(self):
self.view = create_view_from_controller(self) # create the view by convention | [
"[email protected]"
] | |
e9d36d371082deddb241956d3907c7623414b0f9 | 60e7738d90ea7151a790a73285382b0c77799262 | /p3/Lib/site-packages/pandas/tests/io/conftest.py | 828d5d0ccd3c6ee93b6f04c8d67f98ca46907787 | [
"MIT"
] | permissive | fpark7/Native2Native | 251b3c08af16bbbc4d077840f66aea7acdacc002 | 1bc3390770ddafbba2e2779ba91998643df6d9ec | refs/heads/master | 2021-04-18T21:27:41.378371 | 2018-03-27T02:47:51 | 2018-03-27T02:47:51 | 126,620,375 | 1 | 2 | MIT | 2021-03-19T22:50:00 | 2018-03-24T16:52:28 | Python | UTF-8 | Python | false | false | 1,827 | py | import os
import moto
import pytest
from pandas.io.parsers import read_table
HERE = os.path.dirname(__file__)
@pytest.fixture(scope='module')
def tips_file():
"""Path to the tips dataset"""
return os.path.join(HERE, 'parser', 'data', 'tips.csv')
@pytest.fixture(scope='module')
def jsonl_file():
"""Path a JSONL dataset"""
return os.path.join(HERE, 'parser', 'data', 'items.jsonl')
@pytest.fixture(scope='module')
def salaries_table():
"""DataFrame with the salaries dataset"""
path = os.path.join(HERE, 'parser', 'data', 'salaries.csv')
return read_table(path)
@pytest.fixture(scope='module')
def s3_resource(tips_file, jsonl_file):
"""Fixture for mocking S3 interaction.
The primary bucket name is "pandas-test". The following datasets
are loaded.
- tips.csv
- tips.csv.gz
- tips.csv.bz2
- items.jsonl
A private bucket "cant_get_it" is also created. The boto3 s3 resource
is yielded by the fixture.
"""
pytest.importorskip('s3fs')
moto.mock_s3().start()
test_s3_files = [
('tips.csv', tips_file),
('tips.csv.gz', tips_file + '.gz'),
('tips.csv.bz2', tips_file + '.bz2'),
('items.jsonl', jsonl_file),
]
def add_tips_files(bucket_name):
for s3_key, file_name in test_s3_files:
with open(file_name, 'rb') as f:
conn.Bucket(bucket_name).put_object(
Key=s3_key,
Body=f)
boto3 = pytest.importorskip('boto3')
# see gh-16135
bucket = 'pandas-test'
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket=bucket)
add_tips_files(bucket)
conn.create_bucket(Bucket='cant_get_it', ACL='private')
add_tips_files('cant_get_it')
yield conn
moto.mock_s3().stop()
| [
"[email protected]"
] | |
122ca2414bd49a9409a20b2b9f066c334629fc37 | ef875440cf82b6eed61bf6d9d0c6acfae5f90ef4 | /Assument/4.py | 629e385782ecd25815678aa8b3abb8fa849a0121 | [] | no_license | Nitesh101/test | 5ab9b1e23167f8496d90d15484d57328b7f1430e | 4c413b3a056a633c5bcf93ae21c999ff67eeaa95 | refs/heads/master | 2020-03-29T09:04:32.723099 | 2018-09-21T09:33:41 | 2018-09-21T09:33:41 | 149,740,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | val = ([[1,1,1],[2,2,2],[3,3,3]])
lis = []
lis2 = []
lis3 = []
for i in val:
lis.append(i[0],)
lis2.append(i[1],)
lis3.append(i[2],)
val = lis,lis2,lis3,
print list(val)
| [
"[email protected]"
] | |
eb7db7e616591052961f8a747a23784258e3bead | f90dee93b042b689f44ebb53c77585c9174b937a | /btrack/optimise/lineage.py | 542696c43aeb1be8affd8559934066979cdbfbc3 | [
"MIT"
] | permissive | serenidpity/BayesianTracker | a2e654986d6b13e4187e1092b9d1b9d6d0d968d3 | e02a24140e14c79c91222c3c5e91f8658e7e68d3 | refs/heads/master | 2021-03-28T04:03:08.414405 | 2020-02-19T09:44:27 | 2020-02-19T09:44:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,958 | py | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: BayesianTracker
# Purpose: A multi object tracking library, specifically used to reconstruct
# tracks in crowded fields. Here we use a probabilistic network of
# information to perform the trajectory linking. This method uses
# positional and visual information for track linking.
#
# Authors: Alan R. Lowe (arl) [email protected]
#
# License: See LICENSE.md
#
# Created: 14/08/2014
#-------------------------------------------------------------------------------
__author__ = "Alan R. Lowe"
__email__ = "[email protected]"
import os
import json
from collections import OrderedDict
from btrack import btypes
from btrack import dataio
class LineageTreeNode(object):
""" LineageTreeNode
Node object to store tree structure and underlying track data
Args:
track: the Track object
depth: depth of the node in the binary tree (should be > parent)
root: is this the root node?
Properties:
left: pointer to left node
right: pointer to right node
leaf: returns whether this is also a leaf node (no children)
children: returns the left and right nodes together
ID: returns the track ID
start: start time
end: end time
Notes:
"""
def __init__(self,
track=None,
root=False,
depth=0):
assert(isinstance(root, bool))
assert(depth>=0)
self.root = root
self.left = None
self.right = None
self.track = track
self.depth = depth
@property
def leaf(self):
return not all([self.left, self.right])
@property
def children(self):
""" return references to the children (if any) """
if self.leaf: return []
return [self.left, self.right]
@property
def ID(self): return self.track.ID
@property
def start(self): return self.track.t[0]
@property
def end(self): return self.track.t[-1]
def to_dict(self):
""" convert the whole tree (from this node onward) to a dictionary """
return tree_to_dict(self)
@property
def filename(self): return self.track.filename
def tree_to_dict(root):
""" tree_to_dict
Convert a tree to a JSON compatible dictionary. Traverses the tree and
returns a dictionary structure which can be output as a JSON file.
Recursive implementation, hopefully there are no loops!
The JSON representation should look like this:
{
"name": "1",
"children": [
{
"name": "2"
},
{
"name": "3"
}
]
}
Args:
root: a root LineageTreeNode
Returns:
a dictionary representation of the tree.
"""
tree = []
assert(isinstance(root, LineageTreeNode))
tree.append(("name", str(int(root.ID))))
tree.append(("start", root.start))
tree.append(("end", root.end))
if root.children:
tree.append(("children", [tree_to_dict(root.left),tree_to_dict(root.right)]))
# return tree
return OrderedDict(tree)
def export_tree_to_json(tree, filename):
""" export a tree to JSON format for visualisation """
#TODO(arl): proper type checking here
assert(isinstance(tree, dict))
assert(isinstance(filename, str))
with open(filename, 'w') as json_file:
json.dump(tree, json_file, indent=2, separators=(',', ': '))
def create_and_export_trees_to_json(export_dir,
cell_type,
ignore_single_tracks=True):
""" create trees from tracks and export a single tree file
Args:
export_dir: the directory with the json track files, also where trees
will be saved
cell_type: a cell type, e.g. 'GFP'
ignore_single_tracks: ignore non-trees when exporting
"""
# set the correct trees filename
trees_file = os.path.join(export_dir, "trees_{}.json".format(cell_type))
lineage_tree = LineageTree.from_json(export_dir, cell_type)
lineage_tree.create()
json_trees = [tree_to_dict(t) for t in lineage_tree.trees]
if ignore_single_tracks:
json_trees = [t for t in json_trees if 'children' in list(t.keys())]
# now write out the trees
with open(trees_file, 'w') as json_file:
json.dump(json_trees, json_file, indent=2, separators=(',', ': '))
def linearise_tree(root_node):
""" Linearise a tree, i.e. return a list of track objects in the
tree, but lose the heirarchy
Essentially the inverse of tree calculation. Useful for plotting.
"""
assert(isinstance(root_node, LineageTreeNode))
queue = [root_node]
linear = []
while queue:
node = queue.pop(0)
linear.append(node)
if node.children:
queue.append(node.left)
queue.append(node.right)
return linear
class LineageTree(object):
""" LineageTree
Build a lineage tree from track objects.
Args:
tracks: a list of Track objects, typically imported from a json/xml file
Methods:
get_track_by_ID: return the track object with the corresponding ID
create: create the lineage trees by performing a BFS
plot: plot the tree/trees
Notes:
Need to update plotting and return other stats from the trees
"""
def __init__(self, tracks):
assert(isinstance(tracks, list))
if not all([isinstance(trk, btypes.Tracklet) for trk in tracks]):
raise TypeError('Tracks should be of type Track')
# sort the tracks by the starting frame
# self.tracks = sorted(tracks, key=lambda trk:trk.t[0], reverse=False)
tracks.sort(key=lambda trk:trk.t[0], reverse=False)
self.tracks = tracks
def get_track_by_ID(self, ID):
""" return the track object with the corresponding ID """
return [t for t in self.tracks if t.ID==ID][0]
def create(self, update_tracks=True):
""" build the lineage tree """
used = []
self.trees = []
# iterate over the tracks and add them into the growing binary trees
for trk in self.tracks:
if trk not in used:
# TODO(arl): confirm that this is a root node, i.e. the parent
# ID should be the same as the track ID or None
if trk.ID != trk.parent and trk.parent not in (0, None):
print("Error with trk {}".format(trk.ID))
print(trk.ID, trk.parent)
root = LineageTreeNode(track=trk, root=True)
used.append(trk)
if trk.children:
# follow the tree here
queue = [root]
while len(queue) > 0:
q = queue.pop(0)
children = q.track.children
if children:
# make the left node, then the right
left = self.get_track_by_ID(children[0])
right = self.get_track_by_ID(children[1])
# set the children of the current node
d = q.depth + 1 # next level from parent
q.left = LineageTreeNode(track=left, depth=d)
q.right = LineageTreeNode(track=right, depth=d)
# append the left and right children to the queue
queue.append(q.left)
queue.append(q.right)
# flag as used, do not need to revisit
used.append(left)
used.append(right)
if update_tracks:
left.root = root.ID
right.root = root.ID
# append the root node
self.trees.append(root)
return self.trees
def plot(self):
""" plot the trees """
plotter = LineageTreePlotter()
for t in self.trees:
plotter.plot([t])
@property
def linear_trees(self):
""" return each tree as a linear list of tracks """
linear_trees =[linearise_tree(t) for t in self.trees]
return linear_trees
@staticmethod
def from_xml(filename, cell_type=None):
""" create a lineage tree from an XML file """
tracks = dataio.read_XML(filename, cell_type=cell_type)
return LineageTree(tracks)
@staticmethod
def from_json(tracks_dir, cell_type=None):
""" create a lineage tree from an XML file """
tracks = dataio.read_JSON(tracks_dir, cell_type)
return LineageTree(tracks)
if __name__ == "__main__":
pass
| [
"[email protected]"
] | |
86c4d1abbd5ea480d106fd2383b2979cd8424d29 | 9d8f241e4e851f0fe71655d2b698c3d40feab98a | /examples/naive/env_mcarrier_maddpg.py | e0e28ddaff736f3a84abf5d2cbb3be2b3c3c7a3f | [
"MIT"
] | permissive | StepNeverStop/machin | c30c15bbcc38dde06eac57e7ff562753b8927f6b | 3caecb6ea7b02a9687281e2270577a6ed92c5dd8 | refs/heads/master | 2022-11-24T14:43:20.041102 | 2020-07-21T13:08:19 | 2020-07-21T13:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | from .env_walker_ddpg import t, nn, Actor
class Critic(nn.Module):
def __init__(self, agent_num, state_dim, action_dim):
super(Critic, self).__init__()
self.agent_num = agent_num
self.state_dim = state_dim
self.action_dim = action_dim
st_dim = state_dim * agent_num
act_dim = action_dim * agent_num
self.fc1 = nn.Linear(st_dim + act_dim, 1024)
self.fc2 = nn.Linear(1024, 512)
self.fc3 = nn.Linear(512, 1)
# obs: batch_size * obs_dim
def forward(self, all_states, all_actions):
all_actions = t.flatten(all_actions, 1, -1)
all_states = t.flatten(all_states, 1, -1)
q = t.relu(self.fc1(t.cat((all_states, all_actions), dim=1)))
q = t.relu(self.fc2(q))
q = self.fc3(q)
return q | [
"[email protected]"
] | |
1b12c35220e59e832d67c3ed0c724b894ec10460 | a91b397711e2c5334f58ba1d466cf6b58dc694f2 | /code_python/lib/null/models.py | a5574f86629b367b095aaa709afec768f8c646ab | [] | no_license | aleksejs-fomins/mesoscopic-functional-connectivity | 96f68a379ea8c2abd60441d70e18227e4c3e05cb | e17dd3367cdebc3e43eb981b4868c3a148038af8 | refs/heads/master | 2022-04-29T18:06:59.667225 | 2022-04-22T17:06:47 | 2022-04-22T17:06:47 | 199,410,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | import numpy as np
def cycle(arr, nStep):
return np.hstack([arr[-nStep:], arr[:-nStep]])
def mix(x, y, frac):
return (1 - frac) * x + frac * y
def conv_exp(data, dt, tau):
nTexp = int(5*tau / dt)
t = dt * np.arange(nTexp)
exp = np.exp(-t/tau)
exp /= np.sum(exp)
nTData = data.shape[0]
return np.convolve(data, exp)[:nTData]
def two_node_system(nTime, lags=None, trgFracs=None, noiseFrac=0.1, crossXY=0, crossYX=0, convDT=None, convTau=None):
x = np.random.normal(0, 1, nTime)
y = np.random.normal(0, 1, nTime)
# Add lagged coupling
if lags is not None:
y = (1 - np.sum(trgFracs)) * y + np.sum([frac * cycle(x, lag) for frac, lag in zip(trgFracs, lags)], axis=0)
# Add convolution
if convDT is not None:
x = conv_exp(x, convDT, convTau)
y = conv_exp(y, convDT, convTau)
# Add cross-talk. NOTE: with symmetric mixing the variables swap for croxxXY
xMixed = mix(x, y, crossXY)
yMixed = mix(y, x, crossYX)
# Add observation noise
xMixed = mix(xMixed, np.random.normal(0, 1, nTime), noiseFrac)
yMixed = mix(yMixed, np.random.normal(0, 1, nTime), noiseFrac)
return np.array([xMixed, yMixed])
def three_node_system(nTime, lags=None, trgFracs=None, noiseFrac=0.1, crossZX=0, convDT=None, convTau=None):
x = np.random.normal(0, 1, nTime)
y = np.random.normal(0, 1, nTime)
z = np.random.normal(0, 1, nTime)
# Add lagged coupling
if lags is not None:
y = (1 - np.sum(trgFracs)) * y + np.sum([frac * cycle(x, lag) for frac, lag in zip(trgFracs, lags)], axis=0)
# Add convolution
if convDT is not None:
x = conv_exp(x, convDT, convTau)
y = conv_exp(y, convDT, convTau)
z = conv_exp(z, convDT, convTau)
# Add cross-talk. NOTE: with symmetric mixing the variables swap for croxxXY
zMixed = mix(z, x, crossZX)
# Add observation noise
xMixed = mix(x, np.random.normal(0, 1, nTime), noiseFrac)
yMixed = mix(y, np.random.normal(0, 1, nTime), noiseFrac)
zMixed = mix(zMixed, np.random.normal(0, 1, nTime), noiseFrac)
return np.array([xMixed, yMixed, zMixed])
| [
"[email protected]"
] | |
fa10a6c2cd264d81b7fc334f6127a0a158f47c5f | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/Foursquare/Checkins/DeleteComment.py | 9e5e663d00f40a17986521496cd444730514c959 | [] | no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteComment
# Removes a comment to a specified check-in.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class DeleteComment(Choreography):
"""
Create a new instance of the DeleteComment Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Foursquare/Checkins/DeleteComment')
def new_input_set(self):
return DeleteCommentInputSet()
def _make_result_set(self, result, path):
return DeleteCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteCommentChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteComment
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class DeleteCommentInputSet(InputSet):
"""
Set the value of the CheckinID input for this choreography. ((required, string) The ID of the check-in associated with the comment you want to remove.)
"""
def set_CheckinID(self, value):
InputSet._set_input(self, 'CheckinID', value)
"""
Set the value of the CommentID input for this choreography. ((required, string) The id of the comment to remove.)
"""
def set_CommentID(self, value):
InputSet._set_input(self, 'CommentID', value)
"""
Set the value of the OauthToken input for this choreography. ((required, string) The FourSquare API Oauth token string.)
"""
def set_OauthToken(self, value):
InputSet._set_input(self, 'OauthToken', value)
"""
Set the value of the ResponseFormat input for this choreography. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
def set_ResponseFormat(self, value):
InputSet._set_input(self, 'ResponseFormat', value)
"""
A ResultSet with methods tailored to the values returned by the DeleteComment choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class DeleteCommentResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from Foursquare. Corresponds to the ResponseFormat input. Defaults to JSON.)
"""
def get_Response(self):
return self._output.get('Response', None)
class DeleteCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteCommentResultSet(response, path)
| [
"[email protected]"
] | |
1f8a4937050b87f6ab02f889118465b3ecda37fb | faa0ce2a95da958be3bfb171cdff29eeb43c3eb6 | /py-exercises/JulieTestModule/characters/base.py | dafaa1a45e64b9d51460a9bd19e5725b9ce466a8 | [] | no_license | julianapeace/digitalcrafts-exercises | 98fe4e20420c47cf9d92d16c45ac60dc35a49a6a | 98e6680138d55c5d093164a47da53e1ddb6d064c | refs/heads/master | 2021-08-30T04:17:09.997205 | 2017-12-16T00:22:22 | 2017-12-16T00:22:22 | 103,176,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | import random
import time
class Character:
def __init__(self, name, health, power, armor, evade, coincount):
self.name = name
self.health = health
self.power = power
self.armor = armor
self.evade = evade
self.coincount = coincount
def alive(self):
return self.health > 0
def print_status(self):
print("{} has {} health and {} power.".format(self.name, self.health, self.power))
def attack(self, enemy):
enemy.health -= self.power
print("{} does {} damage to the {}.".format(self.name, self.power, enemy.name))
if self.alive() == False:
print("The {} is dead.".format(self.name))
if enemy.alive() == False:
print("The {} is dead.".format(enemy.name))
time.sleep(1.5)
def heal(self):
roll_dice = random.random()
if self.alive and roll_dice < 0.2:
self.health += 2
print("{} gained 2 health.".format(self.name))
time.sleep(1.0)
| [
"[email protected]"
] | |
7ad3646b0c2f153e45d005befda60c71338878c3 | 872f78956f18b124e96b8c6851c7817c2e9147dc | /colcon_core/__init__.py | 3b52c58444fb1b14c9a1ea166cd845acaa848f99 | [
"Apache-2.0"
] | permissive | yossioo/colcon-core | 01ad8f1840c0e677701152961687f9ddf3cd3e2c | 8effab0f840063881fe9e0355a500e025d73b9c2 | refs/heads/master | 2020-06-09T15:41:12.244298 | 2019-06-28T14:47:11 | 2019-06-28T14:47:11 | 193,461,562 | 0 | 0 | Apache-2.0 | 2019-06-24T08:04:31 | 2019-06-24T08:04:30 | null | UTF-8 | Python | false | false | 107 | py | # Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
__version__ = '0.3.22'
| [
"[email protected]"
] | |
97a21525f5b4a28d110146eb0d697b52c9cc2677 | a82dfb61b17fa66b9c75fe871401cff77aa77f56 | /utils/bng_species_reader_py/bng_species_reader_example.py | 5a4dbc089b21feb77a4d7cb8784ed5b265f9810a | [
"MIT"
] | permissive | mcellteam/mcell | 49ca84048a091de8933adccc083d31b7bcb1529e | 3920aec22c55013b78f7d6483b81f70a0d564d22 | refs/heads/master | 2022-12-23T15:01:51.931150 | 2021-09-29T16:49:14 | 2021-09-29T16:49:14 | 10,253,341 | 29 | 12 | NOASSERTION | 2021-07-08T01:56:40 | 2013-05-23T20:59:54 | C++ | UTF-8 | Python | false | false | 5,253 | py | """
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to [http://unlicense.org]
"""
import sys
import os
import pandas as pd
MCELL_PATH = os.environ.get('MCELL_PATH', '')
if MCELL_PATH:
sys.path.append(os.path.join(MCELL_PATH, 'lib'))
else:
print("Error: variable MCELL_PATH that is used to find the mcell library was not set.")
sys.exit(1)
import mcell as m
# utility module to load ASCII viz_output .dat file from
import species_reader
def get_bound_em(complex, src_em, comp_name):
# assuming that there is a single component in src_em
# (source elementary molecule) with comp_name that is bound
bond_index = -1
for c in src_em.components:
if c.component_type.name == comp_name:
# not allowed cases, we need the number
assert c.bond != m.BOND_UNBOUND # no bond
assert c.bond != m.BOND_ANY # !?
assert c.bond != m.BOND_BOUND # !+
bond_index = c.bond
assert bond_index != -1, "Did not find " + comp_name + " in " + src_em.to_bngl_str()
# find this bond in the complex (might be slow)
for em in complex.elementary_molecules:
if em is src_em:
continue
for c in em.components:
if c.bond == bond_index:
return em
assert False, "Did not find paired bond " + str(bond_index) + " in " + complex.to_bngl_str()
def convert_species_file(file_name):
# read the .species file and parse complexes to the internal MCell
# representation
complex_counts = species_reader.read_species_file(file_name)
# prepare the component type that we will append to the CaMKII molecules
# the new component means 'upper' and has a boolean state
ct_u = m.ComponentType('u', ['0','1'])
# process the data by adding component 'u' to the CaMKII elementary molecules
for (complex, count) in complex_counts:
# if this a CaMKII dodecamer?
camkiis = []
for em in complex.elementary_molecules:
em_name = em.elementary_molecule_type.name
if em_name == 'CaMKII':
camkiis.append(em)
if len(camkiis) != 12:
# output it directly
print(complex.to_bngl_str() + " " + str(count))
continue
# ok, we have the holoenzyme, how can we figure out which
# of the CaMKIIs belong to the upper and the lower
# ring?
# let's say that the first one is in the upper ring,
# go along the 'r'
upper_first = camkiis[0]
upper_ring = [ upper_first ]
curr = upper_first
for i in range(1, 6):
curr = get_bound_em(complex, curr, 'r')
upper_ring.append(curr)
assert upper_first is get_bound_em(complex, curr, 'r'), "A ring must be formed"
# then go down along 'c' and go again along 'r' to get molecules of the lower ring
lower_first = get_bound_em(complex, camkiis[0], 'c')
lower_ring = [ lower_first ]
curr = lower_first
for i in range(1, 6):
curr = get_bound_em(complex, curr, 'r')
lower_ring.append(curr)
assert lower_first is get_bound_em(complex, curr, 'r'), "A ring must be formed"
# now the modifications - add components by instatiating the component type 'u'
# the way how the complexes were is parsed was that each complex has its own instance
# of the elementary molecule type for CaMKII so lets change one of them
upper_first.elementary_molecule_type.components.append(ct_u)
for em in upper_ring:
em.components.append(ct_u.inst('1'))
for em in lower_ring:
em.components.append(ct_u.inst('0'))
print(complex.to_bngl_str() + " " + str(count))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Expected .species file name as argument")
sys.exit(1)
convert_species_file(sys.argv[1])
| [
"[email protected]"
] | |
5f75eeaac0ec27d418823af144bf4f4b7adfbfde | d326cd8d4ca98e89b32e6a6bf6ecb26310cebdc1 | /rosalind/algorithmic/heights/bins/main.py | e97c45fba0ff73e7675f4be83c03b4383f888eca | [] | no_license | dswisher/rosalind | d6af5195cdbe03adb5a19ed60fcbf8c05beac784 | 4519740350e47202f7a45ce70e434f7ee15c6afc | refs/heads/master | 2021-08-09T02:58:17.131164 | 2017-11-12T01:26:26 | 2017-11-12T01:26:26 | 100,122,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py |
import sys
from rosalind.common import util
if len(sys.argv) != 2:
print "usage: python bins.py <filename>"
sys.exit(1)
with open(util.find_file(sys.argv[1]), "r") as fp:
fp.readline()
fp.readline()
A = map(int, fp.readline().split())
B = map(int, fp.readline().split())
def binary_search(A, x):
l = 0
r = len(A) - 1
while l <= r:
mid = (l + r) / 2
if A[mid] == x:
return mid + 1
if A[mid] < x:
l = mid + 1
else:
r = mid - 1
return -1
nums = []
for i in B:
nums.append(binary_search(A, i))
print " ".join(map(str, nums))
| [
"[email protected]"
] | |
69743c1ebc9817aa39bb9ba21b9445b0b75e7f04 | 2c4df5b105ccf11102d7611523fdc713a2dbad2e | /mmdet2trt/converters/plugins/create_carafefeaturereassemble_plugin.py | 7f800021d2efd7eb535dba98c3c4305c4d7d82d0 | [
"Apache-2.0"
] | permissive | DataXujing/mmdetection-to-tensorrt | 6b809c9039d3f3d6554298bafc3f5c88e4140183 | 7d49d2aa35b966f67a7e21b4c59dc8924c825548 | refs/heads/master | 2023-03-28T17:53:21.496289 | 2021-03-14T16:18:23 | 2021-03-14T16:18:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,592 | py | import numpy as np
import os
import os.path as osp
from .globals import dir_path
import ctypes
ctypes.CDLL(osp.join(dir_path, "libamirstan_plugin.so"))
import tensorrt as trt
def create_carafefeaturereassemble_plugin(layer_name,
scale_factor,
up_kernel,
up_group,
type_id=trt.DataType.FLOAT):
creator = trt.get_plugin_registry().get_plugin_creator(
'CarafeFeatureReassemblePluginDynamic', '1', '')
pfc = trt.PluginFieldCollection()
pf_scale_factor = trt.PluginField("scale_factor",
np.array([scale_factor], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_scale_factor)
pf_up_kernel = trt.PluginField("up_kernel",
np.array([up_kernel], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_up_kernel)
pf_up_group = trt.PluginField("up_group",
np.array([up_group], dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_up_group)
pf_type_id = trt.PluginField("type_id", np.array([type_id],
dtype=np.int32),
trt.PluginFieldType.INT32)
pfc.append(pf_type_id)
return creator.create_plugin(layer_name, pfc)
| [
"[email protected]"
] | |
caf3403ade329d54b5b29acf0025bdafafa475d9 | f9bfad89ef87a171cfffab2eba9a1ae96886518d | /manipulation/grip/freetabletopplacement.py | 7467caec7fba59e5f0a17754e4a4a56b02c5b620 | [] | no_license | wanweiwei07/pyhiro | 2663f0c9febc9a00da54d3d26e2b00ba758868e0 | 60e24c28a6b39621a235187483d9a13cbbffe987 | refs/heads/master | 2022-02-04T03:31:23.728440 | 2022-01-10T11:26:51 | 2022-01-10T11:26:51 | 60,758,849 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 24,782 | py | #!/usr/bin/python
import os
import MySQLdb as mdb
import numpy as np
from manipulation.grip.robotiq85 import rtq85nm
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletDebugNode
from panda3d.core import *
from shapely.geometry import LinearRing
from shapely.geometry import Point
from shapely.geometry import Polygon
import pandaplotutils.pandactrl as pandactrl
import pandaplotutils.pandageom as pandageom
import trimesh
from pandaplotutils import pandageom as pg
from utils import collisiondetection as cd
from utils import dbcvt as dc
from utils import robotmath as rm
from database import dbaccess as db
class FreeTabletopPlacement(object):
"""
manipulation.freetabletopplacement doesn't take into account
the position and orientation of the object
it is "free" in position and rotation around z axis
in contrast, each item in regrasp.tabletopplacements
has different position and orientation
it is at a specific pose in the workspace
To clearly indicate the difference, "free" is attached
to the front of "freetabletopplacement"
"s" is attached to the end of "tabletopplacements"
"""
def __init__(self, objpath, handpkg, gdb):
self.objtrimesh=trimesh.load_mesh(objpath)
self.objcom = self.objtrimesh.center_mass
self.objtrimeshconv=self.objtrimesh.convex_hull
# oc means object convex
self.ocfacets, self.ocfacetnormals = self.objtrimeshconv.facets_over(.9999)
# for dbaccess
self.dbobjname = os.path.splitext(os.path.basename(objpath))[0]
# use two bulletworld, one for the ray, the other for the tabletop
self.bulletworldray = BulletWorld()
self.bulletworldhp = BulletWorld()
# plane to remove hand
self.planebullnode = cd.genCollisionPlane(offset=0)
self.bulletworldhp.attachRigidBody(self.planebullnode)
self.handpkg = handpkg
self.handname = handpkg.getHandName()
self.hand = handpkg.newHandNM(hndcolor=[0,1,0,.1])
# self.rtq85hnd = rtq85nm.Rtq85NM(hndcolor=[1, 0, 0, .1])
# for dbsave
# each tpsmat4 corresponds to a set of tpsgripcontacts/tpsgripnormals/tpsgripjawwidth list
self.tpsmat4s = None
self.tpsgripcontacts = None
self.tpsgripnormals = None
self.tpsgripjawwidth = None
# for ocFacetShow
self.counter = 0
self.gdb = gdb
self.loadFreeAirGrip()
def loadFreeAirGrip(self):
"""
load self.freegripid, etc. from mysqldatabase
:param gdb: an object of the database.GraspDB class
:return:
author: weiwei
date: 20170110
"""
freeairgripdata = self.gdb.loadFreeAirGrip(self.dbobjname, handname = self.handname)
if freeairgripdata is None:
raise ValueError("Plan the freeairgrip first!")
self.freegripid = freeairgripdata[0]
self.freegripcontacts = freeairgripdata[1]
self.freegripnormals = freeairgripdata[2]
self.freegriprotmats = freeairgripdata[3]
self.freegripjawwidth = freeairgripdata[4]
def loadFreeTabletopPlacement(self):
"""
load free tabletopplacements
:return:
"""
tpsmat4s = self.gdb.loadFreeTabletopPlacement(self.dbobjname)
if tpsmat4s is not None:
self.tpsmat4s = tpsmat4s
return True
else:
self.tpsmat4s = []
return False
def removebadfacets(self, base, doverh=.1):
"""
remove the facets that cannot support stable placements
:param: doverh: d is the distance of mproj to supportfacet boundary, h is the height of com
when fh>dmg, the object tends to fall over. setting doverh to 0.033 means
when f>0.1mg, the object is judged to be unstable
:return:
author: weiwei
date: 20161213
"""
self.tpsmat4s = []
for i in range(len(self.ocfacets)):
geom = pg.packpandageom(self.objtrimeshconv.vertices,
self.objtrimeshconv.face_normals[self.ocfacets[i]],
self.objtrimeshconv.faces[self.ocfacets[i]])
geombullnode = cd.genCollisionMeshGeom(geom)
self.bulletworldray.attachRigidBody(geombullnode)
pFrom = Point3(self.objcom[0], self.objcom[1], self.objcom[2])
pTo = self.objcom+self.ocfacetnormals[i]*99999
pTo = Point3(pTo[0], pTo[1], pTo[2])
result = self.bulletworldray.rayTestClosest(pFrom, pTo)
self.bulletworldray.removeRigidBody(geombullnode)
if result.hasHit():
hitpos = result.getHitPos()
facetinterpnt = np.array([hitpos[0],hitpos[1],hitpos[2]])
facetnormal = np.array(self.ocfacetnormals[i])
bdverts3d, bdverts2d, facetmat4 = pg.facetboundary(self.objtrimeshconv, self.ocfacets[i],
facetinterpnt, facetnormal)
facetp = Polygon(bdverts2d)
facetinterpnt2d = rm.transformmat4(facetmat4, facetinterpnt)[:2]
apntpnt = Point(facetinterpnt2d[0], facetinterpnt2d[1])
dist2p = apntpnt.distance(facetp.exterior)
dist2c = np.linalg.norm(np.array([hitpos[0],hitpos[1],hitpos[2]])-np.array([pFrom[0],pFrom[1],pFrom[2]]))
if dist2p/dist2c >= doverh:
# hit and stable
self.tpsmat4s.append(pg.cvtMat4np4(facetmat4))
def gentpsgrip(self, base):
"""
Originally the code of this function is embedded in the removebadfacet function
It is separated on 20170608 to enable common usage of placements for different hands
:return:
author: weiwei
date: 20170608
"""
self.tpsgripcontacts = []
self.tpsgripnormals = []
self.tpsgriprotmats = []
self.tpsgripjawwidth = []
# the id of the grip in freeair
self.tpsgripidfreeair = []
for i in range(len(self.tpsmat4s)):
self.tpsgripcontacts.append([])
self.tpsgripnormals.append([])
self.tpsgriprotmats.append([])
self.tpsgripjawwidth.append([])
self.tpsgripidfreeair.append([])
for j, rotmat in enumerate(self.freegriprotmats):
tpsgriprotmat = rotmat * self.tpsmat4s[i]
# check if the hand collide with tabletop
# tmprtq85 = self.rtq85hnd
tmphnd = self.hand
# tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[1, 0, 0, 1])
initmat = tmphnd.getMat()
initjawwidth = tmphnd.jawwidth
# open the hand to ensure it doesnt collide with surrounding obstacles
# tmprtq85.setJawwidth(self.freegripjawwidth[j])
tmphnd.setJawwidth(80)
tmphnd.setMat(pandanpmat4 = tpsgriprotmat)
# add hand model to bulletworld
hndbullnode = cd.genCollisionMeshMultiNp(tmphnd.handnp)
result = self.bulletworldhp.contactTest(hndbullnode)
# print result.getNumContacts()
if not result.getNumContacts():
self.tpsgriprotmats[-1].append(tpsgriprotmat)
cct0 = self.tpsmat4s[i].xformPoint(self.freegripcontacts[j][0])
cct1 = self.tpsmat4s[i].xformPoint(self.freegripcontacts[j][1])
self.tpsgripcontacts[-1].append([cct0, cct1])
cctn0 = self.tpsmat4s[i].xformVec(self.freegripnormals[j][0])
cctn1 = self.tpsmat4s[i].xformVec(self.freegripnormals[j][1])
self.tpsgripnormals[-1].append([cctn0, cctn1])
self.tpsgripjawwidth[-1].append(self.freegripjawwidth[j])
self.tpsgripidfreeair[-1].append(self.freegripid[j])
tmphnd.setMat(pandanpmat4 = initmat)
tmphnd.setJawwidth(initjawwidth)
def saveToDB(self):
"""
save freetabletopplacement
manipulation.freetabletopplacement doesn't take into account the position and orientation of the object
it is "free" in position and rotation around z axis
in contrast, each item in regrasp.tabletopplacements has different position and orientation
it is at a specific pose in the workspace
To clearly indicate the difference, "free" is attached to the front of "freetabletopplacement"
"s" is attached to the end of "tabletopplacements"
:param discretesize:
:param gdb:
:return:
author: weiwei
date: 20170111
"""
# save freetabletopplacement
sql = "SELECT * FROM freetabletopplacement,object WHERE freetabletopplacement.idobject = object.idobject \
AND object.name LIKE '%s'" % self.dbobjname
result = self.gdb.execute(sql)
if len(result) == 0:
# the fretabletopplacements for the self.dbobjname is not saved
sql = "INSERT INTO freetabletopplacement(rotmat, idobject) VALUES "
for i in range(len(self.tpsmat4s)):
sql += "('%s', (SELECT idobject FROM object WHERE name LIKE '%s')), " % \
(dc.mat4ToStr(self.tpsmat4s[i]), self.dbobjname)
sql = sql[:-2] + ";"
self.gdb.execute(sql)
else:
print "Freetabletopplacement already exist!"
# save freetabletopgrip
idhand = gdb.loadIdHand(self.handname)
sql = "SELECT * FROM freetabletopgrip,freetabletopplacement,freeairgrip,object WHERE \
freetabletopgrip.idfreetabletopplacement = freetabletopplacement.idfreetabletopplacement AND \
freetabletopgrip.idfreeairgrip = freeairgrip.idfreeairgrip AND \
freetabletopplacement.idobject = object.idobject AND \
object.name LIKE '%s' AND freeairgrip.idhand = %d" % (self.dbobjname, idhand)
result = self.gdb.execute(sql)
if len(result) == 0:
for i in range(len(self.tpsmat4s)):
sql = "SELECT freetabletopplacement.idfreetabletopplacement FROM freetabletopplacement,object WHERE \
freetabletopplacement.rotmat LIKE '%s' AND \
object.name LIKE '%s'" % (dc.mat4ToStr(self.tpsmat4s[i]), self.dbobjname)
result = self.gdb.execute(sql)[0]
print result
if len(result) != 0:
idfreetabletopplacement = result[0]
# note self.tpsgriprotmats[i] might be empty (no cd-free grasps)
if len(self.tpsgriprotmats[i]) != 0:
sql = "INSERT INTO freetabletopgrip(contactpoint0, contactpoint1, contactnormal0, contactnormal1, \
rotmat, jawwidth, idfreetabletopplacement, idfreeairgrip) VALUES "
for j in range(len(self.tpsgriprotmats[i])):
cct0 = self.tpsgripcontacts[i][j][0]
cct1 = self.tpsgripcontacts[i][j][1]
cctn0 = self.tpsgripnormals[i][j][0]
cctn1 = self.tpsgripnormals[i][j][1]
sql += "('%s', '%s', '%s', '%s', '%s', '%s', %d, %d), " % \
(dc.v3ToStr(cct0), dc.v3ToStr(cct1), dc.v3ToStr(cctn0), dc.v3ToStr(cctn1), \
dc.mat4ToStr(self.tpsgriprotmats[i][j]), str(self.tpsgripjawwidth[i][j]), \
idfreetabletopplacement, self.tpsgripidfreeair[i][j])
sql = sql[:-2] + ";"
self.gdb.execute(sql)
else:
print "Freetabletopgrip already exist!"
def removebadfacetsshow(self, base, doverh=.1):
"""
remove the facets that cannot support stable placements
:param: doverh: d is the distance of mproj to supportfacet boundary, h is the height of com
when fh>dmg, the object tends to fall over. setting doverh to 0.033 means
when f>0.1mg, the object is judged to be unstable
:return:
author: weiwei
date: 20161213
"""
plotoffsetfp = 10
# print self.counter
if self.counter < len(self.ocfacets):
i = self.counter
# for i in range(len(self.ocfacets)):
geom = pg.packpandageom(self.objtrimeshconv.vertices,
self.objtrimeshconv.face_normals[self.ocfacets[i]],
self.objtrimeshconv.faces[self.ocfacets[i]])
geombullnode = cd.genCollisionMeshGeom(geom)
self.bulletworldray.attachRigidBody(geombullnode)
pFrom = Point3(self.objcom[0], self.objcom[1], self.objcom[2])
pTo = self.objcom+self.ocfacetnormals[i]*99999
pTo = Point3(pTo[0], pTo[1], pTo[2])
result = self.bulletworldray.rayTestClosest(pFrom, pTo)
self.bulletworldray.removeRigidBody(geombullnode)
if result.hasHit():
hitpos = result.getHitPos()
pg.plotArrow(base.render, spos=self.objcom,
epos = self.objcom+self.ocfacetnormals[i], length=100)
facetinterpnt = np.array([hitpos[0],hitpos[1],hitpos[2]])
facetnormal = np.array(self.ocfacetnormals[i])
bdverts3d, bdverts2d, facetmat4 = pg.facetboundary(self.objtrimeshconv, self.ocfacets[i],
facetinterpnt, facetnormal)
for j in range(len(bdverts3d)-1):
spos = bdverts3d[j]
epos = bdverts3d[j+1]
pg.plotStick(base.render, spos, epos, thickness = 1, rgba=[.5,.5,.5,1])
facetp = Polygon(bdverts2d)
facetinterpnt2d = rm.transformmat4(facetmat4, facetinterpnt)[:2]
apntpnt = Point(facetinterpnt2d[0], facetinterpnt2d[1])
dist2p = apntpnt.distance(facetp.exterior)
dist2c = np.linalg.norm(np.array([hitpos[0],hitpos[1],hitpos[2]])-np.array([pFrom[0],pFrom[1],pFrom[2]]))
if dist2p/dist2c < doverh:
print "not stable"
# return
else:
print dist2p/dist2c
pol_ext = LinearRing(bdverts2d)
d = pol_ext.project(apntpnt)
p = pol_ext.interpolate(d)
closest_point_coords = list(p.coords)[0]
closep = np.array([closest_point_coords[0], closest_point_coords[1], 0])
closep3d = rm.transformmat4(rm.homoinverse(facetmat4), closep)[:3]
pg.plotDumbbell(base.render, spos=facetinterpnt, epos=closep3d, thickness=1.5, rgba=[0,0,1,1])
for j in range(len(bdverts3d)-1):
spos = bdverts3d[j]
epos = bdverts3d[j+1]
pg.plotStick(base.render, spos, epos, thickness = 1.5, rgba=[0,1,0,1])
# geomoff = pg.packpandageom(self.objtrimeshconv.vertices +
# np.tile(plotoffsetfp * self.ocfacetnormals[i],
# [self.objtrimeshconv.vertices.shape[0], 1]),
# self.objtrimeshconv.face_normals[self.ocfacets[i]],
# self.objtrimeshconv.faces[self.ocfacets[i]])
#
# nodeoff = GeomNode('supportfacet')
# nodeoff.addGeom(geomoff)
# staroff = NodePath('supportfacet')
# staroff.attachNewNode(nodeoff)
# staroff.setColor(Vec4(1,0,1,1))
# staroff.setTransparency(TransparencyAttrib.MAlpha)
# staroff.setTwoSided(True)
# staroff.reparentTo(base.render)
self.counter+=1
else:
self.counter=0
def grpshow(self, base):
sql = "SELECT freetabletopplacement.idfreetabletopplacement, freetabletopplacement.rotmat \
FROM freetabletopplacement,object WHERE \
freetabletopplacement.idobject = object.idobject AND object.name LIKE '%s'" % self.dbobjname
result = self.gdb.execute(sql)
if len(result) != 0:
idfreetabletopplacement = int(result[3][0])
objrotmat = dc.strToMat4(result[3][1])
# show object
geom = pg.packpandageom(self.objtrimesh.vertices,
self.objtrimesh.face_normals,
self.objtrimesh.faces)
node = GeomNode('obj')
node.addGeom(geom)
star = NodePath('obj')
star.attachNewNode(node)
star.setColor(Vec4(.77,0.67,0,1))
star.setTransparency(TransparencyAttrib.MAlpha)
star.setMat(objrotmat)
star.reparentTo(base.render)
sql = "SELECT freetabletopgrip.rotmat, freetabletopgrip.jawwidth FROM freetabletopgrip WHERE \
freetabletopgrip.idfreetabletopplacement=%d" % idfreetabletopplacement
result = self.gdb.execute(sql)
for resultrow in result:
hndrotmat = dc.strToMat4(resultrow[0])
hndjawwidth = float(resultrow[1])
# show grasps
tmprtq85 = rtq85nm.Rtq85NM(hndcolor=[0, 1, 0, .1])
tmprtq85.setMat(pandanpmat4 = hndrotmat)
tmprtq85.setJawwidth(hndjawwidth)
# tmprtq85.setJawwidth(80)
tmprtq85.reparentTo(base.render)
def showOnePlacementAndAssociatedGrips(self, base):
"""
show one placement and its associated grasps
:param base:
:return:
"""
for i in range(len(self.tpsmat4s)):
if i == 0:
objrotmat = self.tpsmat4s[i]
# objrotmat.setRow(0, -objrotmat.getRow3(0))
rotzmat = Mat4.rotateMat(0, Vec3(0,0,1))
objrotmat = objrotmat*rotzmat
# show object
geom = pg.packpandageom(self.objtrimesh.vertices,
self.objtrimesh.face_normals,
self.objtrimesh.faces)
node = GeomNode('obj')
node.addGeom(geom)
star = NodePath('obj')
star.attachNewNode(node)
star.setColor(Vec4(.7,0.3,0,1))
star.setTransparency(TransparencyAttrib.MAlpha)
star.setMat(objrotmat)
star.reparentTo(base.render)
for j in range(len(self.tpsgriprotmats[i])):
# for j in range(13,14):
hndrotmat = self.tpsgriprotmats[i][j]
hndjawwidth = self.tpsgripjawwidth[i][j]
# show grasps
tmphnd = self.handpkg.newHandNM(hndcolor=[0, 1, 0, .5])
tmphnd.setMat(pandanpmat4 = hndrotmat)
tmphnd.setJawwidth(hndjawwidth)
# tmprtq85.setJawwidth(80)
tmphnd.reparentTo(base.render)
def ocfacetshow(self, base):
print self.objcom
npf = base.render.find("**/supportfacet")
if npf:
npf.removeNode()
plotoffsetfp = 10
print self.counter
print len(self.ocfacets)
if self.counter < len(self.ocfacets):
geom = pandageom.packpandageom(self.objtrimeshconv.vertices+
np.tile(plotoffsetfp*self.ocfacetnormals[self.counter],
[self.objtrimeshconv.vertices.shape[0],1]),
self.objtrimeshconv.face_normals[self.ocfacets[self.counter]],
self.objtrimeshconv.faces[self.ocfacets[self.counter]])
# geom = pandageom.packpandageom(self.objtrimeshconv.vertices,
# self.objtrimeshconv.face_normals,
# self.objtrimeshconv.faces)
node = GeomNode('supportfacet')
node.addGeom(geom)
star = NodePath('supportfacet')
star.attachNewNode(node)
star.setColor(Vec4(1,0,1,1))
star.setTransparency(TransparencyAttrib.MAlpha)
star.setTwoSided(True)
star.reparentTo(base.render)
self.counter+=1
else:
self.counter = 0
if __name__ == '__main__':
base = pandactrl.World(camp=[700,300,700], lookatp=[0,0,0])
this_dir, this_filename = os.path.split(__file__)
# objpath = os.path.join(this_dir, "objects", "sandpart.stl")
# objpath = os.path.join(this_dir, "objects", "ttube.stl")
# objpath = os.path.join(this_dir, "objects", "tool.stl")
objpath = os.path.join(this_dir, "objects", "tool2.stl")
# objpath = os.path.join(this_dir, "objects", "planewheel.stl")
# objpath = os.path.join(this_dir, "objects", "planelowerbody.stl")
# objpath = os.path.join(this_dir, "objects", "planefrontstay.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay.stl")
# objpath = os.path.join(this_dir, "objects", "planerearstay2.stl")
print objpath
from manipulation.grip.hrp5three import hrp5threenm
handpkg = hrp5threenm
# handpkg = rtq85nm
gdb = db.GraspDB()
tps = FreeTabletopPlacement(objpath, handpkg, gdb)
# objpath0 = os.path.join(this_dir, "objects", "ttube.stl")
# objpath1 = os.path.join(this_dir, "objects", "tool.stl")
# objpath2 = os.path.join(this_dir, "objects", "planewheel.stl")
# objpath3 = os.path.join(this_dir, "objects", "planelowerbody.stl")
# objpath4 = os.path.join(this_dir, "objects", "planefrontstay.stl")
# objpath5 = os.path.join(this_dir, "objects", "planerearstay.stl")
# objpaths = [objpath0, objpath1, objpath2, objpath3, objpath4, objpath5]
# import time
# fo = open("foo.txt", "w")
# for objpath in objpaths:
# tic = time.clock()
# tps = FreeTabletopPlacement(objpath, gdb)
# tps.removebadfacets(base, doverh=.2)
# toc = time.clock()
# print toc-tic
# fo.write(os.path.basename(objpath)+' '+str(toc-tic)+'\n')
# fo.close()
# # plot obj and its convexhull
# geom = pandageom.packpandageom(tps.objtrimesh.vertices,
# tps.objtrimesh.face_normals,
# tps.objtrimesh.faces)
# node = GeomNode('obj')
# node.addGeom(geom)
# star = NodePath('obj')
# star.attachNewNode(node)
# star.setColor(Vec4(1,1,0,1))
# star.setTransparency(TransparencyAttrib.MAlpha)
# star.reparentTo(base.render)
# geom = pandageom.packpandageom(tps.objtrimeshconv.vertices,
# tps.objtrimeshconv.face_normals,
# tps.objtrimeshconv.faces)
# node = GeomNode('objconv')
# node.addGeom(geom)
# star = NodePath('objconv')
# star.attachNewNode(node)
# star.setColor(Vec4(0, 1, 0, .3))
# star.setTransparency(TransparencyAttrib.MAlpha)
# star.reparentTo(base.render)
# pg.plotSphere(base.render, pos=tps.objcom, radius=10, rgba=[1,0,0,1])
# def updateshow(task):
# # tps.ocfacetshow(base)
# tps.removebadfacetsshow(base, doverh=.1)
# return task.again
# taskMgr.doMethodLater(.1, updateshow, "tickTask")
# def updateworld(world, task):
# world.doPhysics(globalClock.getDt())
# return task.cont
#
if tps.loadFreeTabletopPlacement():
pass
else:
tps.removebadfacets(base, doverh=.15)
tps.gentpsgrip(base)
tps.saveToDB()
#
# bullcldrnp = base.render.attachNewNode("bulletcollider")
# debugNode = BulletDebugNode('Debug')
# debugNode.showWireframe(True)
# debugNP = bullcldrnp.attachNewNode(debugNode)
# debugNP.show()
#
# tps.bulletworldhp.setDebugNode(debugNP.node())
#
# taskMgr.add(updateworld, "updateworld", extraArgs=[tps.bulletworldhp], appendTask=True)
# tps.grpshow(base)
# tps.showOnePlacementAndAssociatedGrips(base)
base.run() | [
"[email protected]"
] | |
b021d9ec2c2516c3b8a4d02a66baf7ed888c2433 | 5f7225e40f34668c93935ef1e003629f28856030 | /mlu/metrics/text/cider/cider_coco.py | 60bc7517bd2022263a948f1391a7f0f7e32ad79f | [
"MIT"
] | permissive | Labbeti/MLU | 33dd639e134f9eeca6609c1533c7936b8f823111 | 91aa907a3f820e53902578c3d0110fe9a01c88e7 | refs/heads/master | 2023-05-23T14:50:23.438152 | 2021-06-14T10:55:24 | 2021-06-14T10:55:24 | 320,558,953 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,466 | py | """
Microsoft COCO caption metric 'CIDER'.
Code imported from : https://github.com/peteanderson80/coco-caption/blob/master/pycocoevalcap/cider/cider_scorer.py
and https://github.com/peteanderson80/coco-caption/blob/master/pycocoevalcap/cider/cider.py
Authors : Ramakrishna Vedantam <[email protected]> and Tsung-Yi Lin <[email protected]>
Modified : Yes (typing_, imports)
"""
# Filename: cider.py
#
# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric
# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
#
# Creation Date: Sun Feb 8 14:16:54 2015
#
# Authors: Ramakrishna Vedantam <[email protected]> and Tsung-Yi Lin <[email protected]>
# !/usr/bin/env python
# Tsung-Yi Lin <[email protected]>
# Ramakrishna Vedantam <[email protected]>
import copy
import math
import numpy as np
from collections import defaultdict
from typing import List
def precook(s: str, n: int = 4):
"""
Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well.
:param s: string : sentence to be converted into ngrams
:param n: int : number of ngrams for which representation is calculated
:return: term frequency vector for occuring ngrams
"""
words = s.split()
counts = defaultdict(int)
for k in range(1, n + 1):
for i in range(len(words) - k + 1):
ngram = tuple(words[i:i + k])
counts[ngram] += 1
return counts
def cook_refs(refs: List[str], n: int = 4): # lhuang: oracle will call with 'average'
"""Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.
:param refs: list of string : reference sentences for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (list of dict)
"""
return [precook(ref, n) for ref in refs]
def cook_test(test: str, n: int = 4):
"""Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.
:param test: list of string : hypothesis sentence for some image
:param n: int : number of ngrams for which (ngram) representation is calculated
:return: result (dict)
"""
return precook(test, n)
class CiderScorer(object):
"""CIDEr scorer.
"""
def copy(self):
""" copy the refs."""
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def __init__(self, test: str = None, refs: List[str] = None, n=4, sigma=6.0):
""" singular instance """
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.document_frequency = defaultdict(float)
self.cook_append(test, refs)
self.ref_len = None
def cook_append(self, test: str, refs: List[str]):
"""called by constructor and __iadd__ to avoid creating new instances."""
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
self.ctest.append(cook_test(test)) # N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
def size(self):
assert len(self.crefs) == len(self.ctest), 'refs/test mismatch! %d<>%d' % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
"""add an instance (e.g., from another sentence)."""
if type(other) is tuple:
# avoid creating new CiderScorer instances
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
"""
Compute term frequency for reference data.
This will be used to compute idf (inverse document frequency later)
The term frequency is stored in the object
:return: None
"""
for refs in self.crefs:
# refs, k ref captions of one image
for ngram in set([ngram for ref in refs for ngram, count in ref.items()]):
self.document_frequency[ngram] += 1
# maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
# compute log reference length
self.ref_len = np.log(float(len(self.crefs)))
def compute_cider(self):
def counts2vec(cnts):
"""
Function maps counts of ngram to vector of tfidf weights.
The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
The n-th entry of array denotes length of n-grams.
:param cnts:
:return: vec (array of dict), norm (array of float), length (int)
"""
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram, term_freq) in cnts.items():
# give word count 1 if it doesn't appear in reference corpus
df = np.log(max(1.0, self.document_frequency[ngram]))
# ngram index
n = len(ngram) - 1
# tf (term_freq) * idf (precomputed idf) for n-grams
vec[n][ngram] = float(term_freq) * (self.ref_len - df)
# compute norm for the vector. the norm will be used for computing similarity
norm[n] += pow(vec[n][ngram], 2)
if n == 1:
length += term_freq
norm = [np.sqrt(n) for n in norm]
return vec, norm, length
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
"""
Compute the cosine similarity of two vectors.
:param vec_hyp: array of dictionary for vector corresponding to hypothesis
:param vec_ref: array of dictionary for vector corresponding to reference
:param norm_hyp: array of float for vector corresponding to hypothesis
:param norm_ref: array of float for vector corresponding to reference
:param length_hyp: int containing length of hypothesis
:param length_ref: int containing length of reference
:return: array of score for each n-grams cosine similarity
"""
delta = float(length_hyp - length_ref)
# measure consine similarity
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
# ngram
for (ngram, count) in vec_hyp[n].items():
# vrama91 : added clipping
val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
val[n] /= (norm_hyp[n] * norm_ref[n])
assert (not math.isnan(val[n]))
# vrama91: added a length based gaussian penalty
val[n] *= np.e ** (-(delta ** 2) / (2 * self.sigma ** 2))
return val
scores = []
for test, refs in zip(self.ctest, self.crefs):
# compute vector for test captions
vec, norm, length = counts2vec(test)
# compute vector for ref captions
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
vec_ref, norm_ref, length_ref = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
# change by vrama91 - mean of ngram scores, instead of sum
score_avg = np.mean(score)
# divide by number of references
score_avg /= len(refs)
# multiply score by 10
score_avg *= 10.0
# append score of an image to the score list
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
# compute idf
self.compute_doc_freq()
# assert to check document frequency
assert (len(self.ctest) >= max(self.document_frequency.values()))
# compute cider score
score = self.compute_cider()
# debug
# print score
return np.mean(np.array(score)), np.array(score)
class Cider:
"""
Main Class to compute the CIDEr metric
"""
def __init__(self, test=None, refs=None, n=4, sigma=6.0):
# set cider to sum over 1 to 4-grams
self._n = n
# set the standard deviation parameter for gaussian penalty
self._sigma = sigma
def compute_score(self, gts, res):
"""
Main function to compute CIDEr score
:param hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
ref_for_image (dict) : dictionary with key <image> and value <tokenized reference sentence>
:return: cider (float) : computed CIDEr score for the corpus
"""
assert (sorted(gts.keys()) == sorted(res.keys()))
imgIds = sorted(gts.keys())
cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
for id in imgIds:
hypo = res[id]
ref = gts[id]
# Sanity check.
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) >= 1)
cider_scorer += (hypo[0], ref)
(score, scores) = cider_scorer.compute_score()
return score, scores
def method(self):
return 'CIDEr'
| [
"[email protected]"
] | |
7f5464dccd50c6e491579e18aadd00d40c17fa86 | 25872e1ba4f86cbbf77d0130f341b21e5dd9e692 | /LetterCombinationsOfAPhoneNumber.py | a161dcb8f22d6177e26dc50a95632fe67acae452 | [] | no_license | zongxinwu92/leetcode | dc3d209e14532b9b01cfce6d4cf6a4c2d7ced7de | e1aa45a1ee4edaf72447b771ada835ad73e7f508 | refs/heads/master | 2021-06-10T21:46:23.937268 | 2017-01-09T09:58:49 | 2017-01-09T09:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | '''
Created on 1.12.2017
@author: Jesse
''''''
Given a digit string, return all possible letter combinations that the number could represent.
A mapping of digit to letters (just like on the telephone buttons) is given below.
Input:Digit string "23"
Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
Note:
Although the above answer is in lexicographical order, your answer could be in any order you want.
"
'''
| [
"[email protected]"
] | |
f217d34a908c4e9257764aa68d23ca1dda940bfe | 06e62355e97daacd95d513572f1f7dbd5f73411e | /opt/notebooks/cubic_reg.py | ee2533643f4f56db8d325d72f75fac71bae4f78c | [] | no_license | tt6746690/misc_impl | 3a890abab44a79a9e7fdaa1190e8c1bb43b9f7b2 | 0a6653a66f1fb2590df9d6697e4cd69d32a2baaa | refs/heads/master | 2022-11-05T12:51:28.143805 | 2021-11-15T22:07:44 | 2021-11-15T22:07:44 | 217,972,549 | 0 | 1 | null | 2022-10-10T06:42:16 | 2019-10-28T05:30:45 | Jupyter Notebook | UTF-8 | Python | false | false | 21,042 | py | """
This module implements cubic regularization of Newton's method, as described in Nesterov and Polyak (2006) and also
the adaptive cubic regularization algorithm described in Cartis et al. (2011). This code solves the cubic subproblem
according to slight modifications of Algorithm 7.3.6 of Conn et. al (2000). Cubic regularization solves unconstrained
minimization problems by minimizing a cubic upper bound to the function at each iteration.
Implementation by Corinne Jones
[email protected]
June 2016
References:
- Nesterov, Y., & Polyak, B. T. (2006). Cubic regularization of Newton method and its global performance.
Mathematical Programming, 108(1), 177-205.
- Cartis, C., Gould, N. I., & Toint, P. L. (2011). Adaptive cubic regularisation methods for unconstrained optimization.
Part I: motivation, convergence and numerical results. Mathematical Programming, 127(2), 245-295.
- Conn, A. R., Gould, N. I., & Toint, P. L. (2000). Trust region methods (Vol. 1). Siam.
- Gould, N. I., Lucidi, S., Roma, M., & Toint, P. L. (1999). Solving the trust-region subproblem using the Lanczos
method. SIAM Journal on Optimization, 9(2), 504-525.
"""
from __future__ import division
import numpy as np
import scipy.linalg
class Algorithm:
def __init__(self, x0, f=None, gradient=None, hessian=None, L=None, L0=None, kappa_easy=0.0001, maxiter=10000, submaxiter=100000, conv_tol=1e-5, conv_criterion='gradient', epsilon=2*np.sqrt(np.finfo(float).eps)):
"""
Collect all the inputs to the cubic regularization algorithm.
Required inputs: function or all of gradient and Hessian and L. If you choose conv_criterion='Nesterov', you must also supply L.
:param x0: Starting point for cubic regularization algorithm
:param f: Function to be minimized
:param gradient: Gradient of f (input as a function that returns a numpy array)
:param hessian: Hessian of f (input as a function that returns a numpy array)
:param L: Lipschitz constant on the Hessian
:param L0: Starting point for line search for M
:param kappa_easy: Convergence tolerance for the cubic subproblem
:param maxiter: Maximum number of cubic regularization iterations
:param submaxiter: Maximum number of iterations for the cubic subproblem
:param conv_tol: Convergence tolerance
:param conv_criterion: Criterion for convergence: 'gradient' or 'nesterov'. Gradient uses norm of gradient.
Nesterov's uses max(sqrt(2/(L+M)norm(f'(x)), -2/(2L+M)lambda_min(f''(x))).
:param epsilon: Value added/subtracted from x when approximating gradients and Hessians
"""
self.f = f
self.gradient = gradient
self.hessian = hessian
self.x0 = np.array(x0)*1.0
self.maxiter = maxiter
self.submaxiter = submaxiter
self.conv_tol = conv_tol
self.conv_criterion = conv_criterion.lower()
self.epsilon = epsilon
self.L = L
self.L0 = L0
self.kappa_easy = kappa_easy
self.n = len(x0)
self._check_inputs()
# Estimate the gradient, hessian, and find a lower bound L0 for L if necessary
if gradient is None:
self.gradient = self.approx_grad
if hessian is None:
self.hessian = self.approx_hess
if L0 is None and L is None:
self.L0 = np.linalg.norm(self.hessian(self.x0)-self.hessian(self.x0+np.ones_like(self.x0)), ord=2)/np.linalg.norm(np.ones_like(self.x0))+self.epsilon
self.grad_x = self.gradient(self.x0)
self.hess_x = self.hessian(self.x0)
self.lambda_nplus = self._compute_lambda_nplus()[0]
def _check_inputs(self):
"""
Ensure that the inputs are of the right form and all necessary inputs have been supplied
"""
if not isinstance(self.x0, (tuple, list, np.ndarray)):
raise TypeError('Invalid input type for x0')
if len(self.x0) < 1:
raise ValueError('x0 must have length > 0')
if not (self.f is not None or (self.gradient is not None and self.hessian is not None and self.L is not None)):
raise AttributeError('You must specify f and/or each of the following: gradient, hessian, and L')
if not((not self.L or self.L > 0)and (not self.L0 or self.L0 > 0) and self.kappa_easy > 0 and self.maxiter > 0 and self.conv_tol > 0 and self.epsilon > 0):
raise ValueError('All inputs that are constants must be larger than 0')
if self.f is not None:
try:
self.f(self.x0)
except TypeError:
raise TypeError('x0 is not a valid input to function f')
if self.gradient is not None:
try:
self.gradient(self.x0)
except TypeError:
raise TypeError('x0 is not a valid input to the gradient. Is the gradient a function with input dimension length(x0)?')
if self.hessian is not None:
try:
self.hessian(self.x0)
except TypeError:
raise TypeError('x0 is not a valid input to the hessian. Is the hessian a function with input dimension length(x0)?')
if not (self.conv_criterion == 'gradient' or self.conv_criterion == 'nesterov'):
raise ValueError('Invalid input for convergence criterion')
if self.conv_criterion == 'nesterov' and self.L is None:
raise ValueError("With Nesterov's convergence criterion you must specify L")
@staticmethod
def _std_basis(size, idx):
"""
Compute the idx'th standard basis vector
:param size: Length of the vector
:param idx: Index of value 1 in the vector
:return: ei: Standard basis vector with 1 in the idx'th position
"""
ei = np.zeros(size)
ei[idx] = 1
return ei
def approx_grad(self, x):
"""
Approximate the gradient of the function self.f at x
:param x: Point at which the gradient will be approximated
:return: Estimated gradient at x
"""
return np.asarray([(self.f(x + self.epsilon * self._std_basis(self.n, i)) - self.f(x - self.epsilon * self._std_basis(self.n, i))) / (2 * self.epsilon) for i in range(0, self.n)])
def approx_hess(self, x):
"""
Approximate the hessian of the function self.x at x
:param x: Point at which the Hessian will be approximated
:return: Estimated Hessian at x
"""
grad_x0 = self.gradient(x)
hessian = np.zeros((self.n, self.n))
for j in range(0, self.n):
grad_x_plus_eps = self.gradient(x + self.epsilon * self._std_basis(self.n, j))
for i in range(0, self.n):
hessian[i,j] = (grad_x_plus_eps[i]-grad_x0[i])/self.epsilon
return hessian
def _compute_lambda_nplus(self):
"""
Compute max(-1*smallest eigenvalue of hessian of f at x, 0)
:return: max(-1*smallest eigenvalue of hessian of f at x, 0)
:return: lambda_n: Smallest eigenvaleu of hessian of f at x
"""
lambda_n = scipy.linalg.eigh(self.hess_x, eigvals_only=True, eigvals=(0, 0))
return max(-lambda_n[0], 0), lambda_n
def _check_convergence(self, lambda_min, M):
"""
Check whether the cubic regularization algorithm has converged
:param lambda_min: Minimum eigenvalue at current point
:param M: Current value used for M in cubic upper approximation to f at x_new
:return: True/False depending on whether the convergence criterion has been satisfied
"""
if self.conv_criterion == 'gradient':
if np.linalg.norm(self.grad_x) <= self.conv_tol:
return True
else:
return False
elif self.conv_criterion == 'nesterov':
if max(np.sqrt(2/(self.L+M)*np.linalg.norm(self.grad_x)), -2/(2*self.L+M)*lambda_min) <= self.conv_tol:
return True
else:
return False
class CubicRegularization(Algorithm):
def __init__(self, x0, f=None, gradient=None, hessian=None, L=None, L0=None, kappa_easy=0.0001, maxiter=10000, submaxiter=10000, conv_tol=1e-5, conv_criterion='gradient', epsilon=2*np.sqrt(np.finfo(float).eps)):
Algorithm.__init__(self, x0, f=f, gradient=gradient, hessian=hessian, L=L, L0=L0, kappa_easy=kappa_easy, maxiter=maxiter, submaxiter=submaxiter, conv_tol=conv_tol, conv_criterion=conv_criterion, epsilon=epsilon)
def cubic_reg(self):
"""
Run the cubic regularization algorithm
:return: x_new: Final point
:return: intermediate_points: All points visited by the cubic regularization algorithm on the way to x_new
:return: iter: Number of iterations of cubic regularization
"""
iter = flag = 0
converged = False
x_new = self.x0
mk = self.L0
intermediate_points = [x_new]
while iter < self.maxiter and converged is False:
x_old = x_new
x_new, mk, flag = self._find_x_new(x_old, mk)
self.grad_x = self.gradient(x_new)
self.hess_x = self.hessian(x_new)
self.lambda_nplus, lambda_min = self._compute_lambda_nplus()
converged = self._check_convergence(lambda_min, mk)
if flag != 0:
print('Convergence criteria not met, likely due to round-off error or ill-conditioned Hessian.')
return x_new, intermediate_points, iter, flag
intermediate_points.append(x_new)
iter += 1
return x_new, intermediate_points, iter, flag
def _find_x_new(self, x_old, mk):
"""
Determine what M_k should be and compute the next point for the cubic regularization algorithm
:param x_old: Previous point
:param mk: Previous value of M_k (will start with this if L isn't specified)
:return: x_new: New point
:return: mk: New value of M_k
"""
if self.L is not None:
aux_problem = _AuxiliaryProblem(x_old, self.grad_x, self.hess_x, self.L, self.lambda_nplus, self.kappa_easy, self.submaxiter)
s, flag = aux_problem.solve()
x_new = s+x_old
return x_new, self.L, flag
else:
decreased = False
iter = 0
f_xold = self.f(x_old)
while not decreased and iter < self.submaxiter:
mk *= 2
aux_problem = _AuxiliaryProblem(x_old, self.grad_x, self.hess_x, mk, self.lambda_nplus, self.kappa_easy, self.submaxiter)
s, flag = aux_problem.solve()
x_new = s+x_old
decreased = (self.f(x_new)-f_xold <= 0)
iter += 1
if iter == self.submaxiter:
raise RuntimeError('Could not find cubic upper approximation')
mk = max(0.5 * mk, self.L0)
return x_new, mk, flag
class AdaptiveCubicReg(Algorithm):
def __init__(self, x0, f, gradient=None, hessian=None, L=None, L0=None, sigma0=1, eta1=0.1, eta2=0.9, kappa_easy=0.0001, maxiter=10000, submaxiter=10000, conv_tol=1e-5, hessian_update_method='exact', conv_criterion='gradient', epsilon=2*np.sqrt(np.finfo(float).eps)):
Algorithm.__init__(self, x0, f=f, gradient=gradient, hessian=hessian, L=sigma0/2, L0=L0, kappa_easy=kappa_easy, maxiter=maxiter, submaxiter=submaxiter, conv_tol=conv_tol, conv_criterion=conv_criterion, epsilon=epsilon)
self.sigma = sigma0
self.eta1 = eta1
self.eta2 = eta2
self.intermediate_points = [self.x0]
self.iter = 0
self.hessian_update_method= hessian_update_method.lower()
def _update_hess(self, x_new, grad_x_old, s, method='exact'):
"""
Compute the (approximation) of the Hessian at the next point
:param x_new: Next point
:param grad_x_old: Gradient at old point
:param s: Step from old point to new point
:param method: Method to be used to update the Hessian. Choice: 'exact', 'broyden' (Powell-symmetric Broyden
update), or 'rank_one' (Rank one symmetric update)
"""
if method == 'exact':
self.hess_x = self.hessian(x_new)
else:
y = self.grad_x - grad_x_old
r = y - self.hess_x.dot(s)
if method == 'broyden':
self.hess_x += (np.outer(r, s)+np.outer(s, r))/np.dot(s, s)-np.dot(r, s)*np.outer(s, s)/(np.dot(s, s)**2)
elif method == 'rank_one':
if np.linalg.norm(r) != 0:
self.hess_x += np.outer(r, r)/np.dot(r, s)
else:
raise NotImplementedError("Hessian update method'+method+'not implemented. Try 'exact', 'broyden', or 'rank_one'.")
def _m(self, f_x, s):
"""
Compute the value of the cubic approximation to f at the proposed next point
:param f_x: Value of f(x) at current point x
:param s: Proposed step to take
:return: Value of the cubic approximation to f at the proposed next point
"""
return f_x + s.dot(self.grad_x) + 0.5*s.dot(self.hess_x).dot(s) + 1/3*self.sigma*np.linalg.norm(s)**3
def _update_x_params(self, s, x_old, f_x):
"""
Update x, the function value, gradient, and Hessian at x, and sigma
:param s: Proposed step to take
:param x_old: Current point
:param f_x: Function value at current point
:return: x_new: Next point
"""
f_xnew = self.f(s+x_old)
rho = (f_x - f_xnew)/(f_x - self._m(f_x, s))
if rho >= self.eta1:
x_new = x_old + s
grad_x_old = self.grad_x
self.f_x = f_xnew
self.grad_x = self.gradient(x_new)
self._update_hess(x_new, grad_x_old, s, method=self.hessian_update_method)
# If a very successful iteration, decrease sigma
if rho > self.eta2:
self.sigma = max(min(self.sigma, np.linalg.norm(self.grad_x)), self.epsilon)
self.intermediate_points.append(x_new)
self.iter += 1
else:
x_new = x_old
self.sigma *= 2
return x_new
def adaptive_cubic_reg(self):
"""
Run the adaptive cubic regularization algorithm
:return: x_new: Final point
:return: self.intermediate_points: All points visited by the adaptive cubic regularization algorithm on the way to x_new
:return: self.iter: Number of iterations of adaptive cubic regularization
"""
converged = False
x_new = self.x0
f_xold = self.f(x_new)
fail = flag = 0
while self.iter < self.maxiter and converged is False:
x_old = x_new
aux_problem = _AuxiliaryProblem(x_old, self.grad_x, self.hess_x, 2*self.sigma, self.lambda_nplus, self.kappa_easy,
self.submaxiter)
s, flag = aux_problem.solve()
if flag == 0:
fail = 0
x_new = self._update_x_params(s, x_old, f_xold)
self.lambda_nplus, lambda_min = self._compute_lambda_nplus()
if np.linalg.norm(x_new-x_old) > 0:
converged = self._check_convergence(lambda_min, 2*self.sigma)
elif fail == 0:
# When flag != 0, the Hessian is probably wrong. Update to the exact Hessian.
# Don't enter this part if it fails twice in a row since this won't help.
fail = 1
self.hess_x = self.hessian(x_old)
self.lambda_nplus, lambda_min = self._compute_lambda_nplus()
else:
print('Convergence criteria not met, likely due to round-off error or ill-conditioned Hessian.')
return x_new, self.intermediate_points, self.iter, flag
return x_new, self.intermediate_points, self.iter, flag
class _AuxiliaryProblem:
"""
Solve the cubic subproblem as described in Conn et. al (2000) (see reference at top of file)
The notation in this function follows that of the above reference.
"""
def __init__(self, x, gradient, hessian, M, lambda_nplus, kappa_easy, submaxiter):
"""
:param x: Current location of cubic regularization algorithm
:param gradient: Gradient at current point
:param hessian: Hessian at current point
:param M: Current value used for M in cubic upper approximation to f at x_new
:param lambda_nplus: max(-1*smallest eigenvalue of hessian of f at x, 0)
:param kappa_easy: Convergence tolerance
"""
self.x = x
self.grad_x = gradient
self.hess_x = hessian
self.M = M
self.lambda_nplus = lambda_nplus
self.kappa_easy = kappa_easy
self.maxiter = submaxiter
# Function to compute H(x)+lambda*I as function of lambda
self.H_lambda = lambda lambduh: self.hess_x + lambduh*np.identity(np.size(self.hess_x, 0))
# Constant to add to lambda_nplus so that you're not at the zero where the eigenvalue is
self.lambda_const = (1+self.lambda_nplus)*np.sqrt(np.finfo(float).eps)
def _compute_s(self, lambduh):
"""
Compute L in H_lambda = LL^T and then solve LL^Ts = -g
:param lambduh: value for lambda in H_lambda
:return: s, L
"""
try:
# Numpy's Cholesky seems more numerically stable than scipy's Cholesky
L = np.linalg.cholesky(self.H_lambda(lambduh)).T
except:
# See p. 516 of Gould et al. (1999) (see reference at top of file)
self.lambda_const *= 2
try:
s, L = self._compute_s(self.lambda_nplus + self.lambda_const)
except:
return np.zeros_like(self.grad_x), [], 1
s = scipy.linalg.cho_solve((L, False), -self.grad_x)
return s, L, 0
def _update_lambda(self, lambduh, s, L):
"""
Update lambda by taking a Newton step
:param lambduh: Current value of lambda
:param s: Current value of -(H+lambda I)^(-1)g
:param L: Matrix L from Cholesky factorization of H_lambda
:return: lambduh - phi/phi_prime: Next value of lambda
"""
w = scipy.linalg.solve_triangular(L.T, s, lower=True)
norm_s = np.linalg.norm(s)
phi = 1/norm_s-self.M/(2*lambduh)
phi_prime = np.linalg.norm(w)**2/(norm_s**3)+self.M/(2*lambduh**2)
return lambduh - phi/phi_prime
def _converged(self, s, lambduh):
"""
Check whether the algorithm from the subproblem has converged
:param s: Current estimate of -(H+ lambda I)^(-1)g
:param lambduh: Current estimate of lambda := Mr/2
:return: True/False based on whether the convergence criterion has been met
"""
r = 2*lambduh/self.M
if abs(np.linalg.norm(s)-r) <= self.kappa_easy:
return True
else:
return False
def solve(self):
"""
Solve the cubic regularization subproblem. See algorithm 7.3.6 in Conn et al. (2000).
:return: s: Step for the cubic regularization algorithm
"""
if self.lambda_nplus == 0:
lambduh = 0
else:
lambduh = self.lambda_nplus + self.lambda_const
s, L, flag = self._compute_s(lambduh)
if flag != 0:
return s, flag
r = 2*lambduh/self.M
if np.linalg.norm(s) <= r:
if lambduh == 0 or np.linalg.norm(s) == r:
return s, 0
else:
Lambda, U = np.linalg.eigh(self.H_lambda(self.lambda_nplus))
s_cri = -U.T.dot(np.linalg.pinv(np.diag(Lambda))).dot(U).dot(self.grad_x)
alpha = max(np.roots([np.dot(U[:, 0], U[:, 0]), 2*np.dot(U[:, 0], s_cri), np.dot(s_cri, s_cri)-4*self.lambda_nplus**2/self.M**2]))
s = s_cri + alpha*U[:, 0]
return s, 0
if lambduh == 0:
lambduh += self.lambda_const
iter = 0
while not self._converged(s, lambduh) and iter < self.maxiter:
iter += 1
lambduh = self._update_lambda(lambduh, s, L)
s, L, flag = self._compute_s(lambduh)
if flag != 0:
return s, flag
if iter == self.maxiter:
print('Warning: Could not compute s: maximum number of iterations reached')
return s, 0
| [
"[email protected]"
] | |
9c20160cd86ef2eab9e2d832cd876611b2e83109 | a5455dbb01687ab031f6347306dbb5ccc3c0c162 | /第一阶段/day17/day16_exercise/write_number_to_file.py | 996500d0100e6d34e65e6e9c88c3f4708a0c4fe4 | [] | no_license | zuobing1995/tiantianguoyuan | 9ff67aef6d916e27d92b63f812c96a6d5dbee6f8 | 29af861f5edf74a4a1a4156153678b226719c56d | refs/heads/master | 2022-11-22T06:50:13.818113 | 2018-11-06T04:52:53 | 2018-11-06T04:52:53 | 156,317,754 | 1 | 1 | null | 2022-11-22T01:06:37 | 2018-11-06T03:02:51 | Python | UTF-8 | Python | false | false | 614 | py | # write_number_to_file.py
# 1. 写程序,让用户输入一系列整数,当输入小于零的数时结束输入
# 1) 将这些数字存于列表中
# 2) 将列表中的数字写入到文件numbers.txt中
# (提示:需要将整数转为字符串或字节串才能存入文件中)
L = []
while True:
n = int(input("请输入大于0的整数: "))
if n < 0:
break
L.append(n)
print(L)
try:
f = open('numbers.txt', 'w') # 文本文件方式打开
for n in L:
f.write(str(n)) # 出错
f.write('\n')
f.close()
except OSError:
print("文件打开失败")
| [
"[email protected]"
] | |
d1b9cf401bc46cd2ceebba56153d629bd0c3b9d2 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /T30_turtle-动画指针与小球.py | b195bd3e99eb61a4530e16476d239fafff47cc01 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from turtle import *
import math
def main():
ht()
while True:
for q in range(10,1800,1):
bgcolor(1,1,1)
clear(); tracer(False)
a=3.1416*q/1800
xc=300*math.cos(a)
yc=300*math.sin(a)
width(5);color(0,0,0.5)
goto(0,0)
goto(xc,yc)
dot(45)
update()
for q in range(1800,3600,1):
clear(); tracer(False)
a=3.1416*q/1800
xc=300*math.cos(-a)
yc=300*math.sin(-a)
width(5);color(0,0.5,0)
pu()
goto(0,0)
goto(xc,yc)
pd()
dot(65)
update()
return "DONE!"
############## start the Main #################
main()
| [
"[email protected]"
] | |
44068c9ca7aa803a1aec9f9cb0b8c2a31bbe15a2 | 5196ecf41ac6a3de00d49c2039bbbe5efbd4ec03 | /examples/datachannel-vpn/vpn.py | b6135ef39c2de05deb39bd2245f0227fcd0f043b | [
"BSD-3-Clause"
] | permissive | ryogrid/aiortc-dc | 3962929432610dc2c1834636adc5f27ea8899b92 | c24ab64f9a7be66e22d8050fc01f376e55e6f24b | refs/heads/to-make-pip-package-from-master | 2020-04-29T20:34:19.367423 | 2019-05-23T21:32:00 | 2019-05-23T21:32:00 | 176,387,482 | 4 | 2 | BSD-3-Clause | 2019-05-21T23:45:06 | 2019-03-18T23:49:01 | Python | UTF-8 | Python | false | false | 2,758 | py | import argparse
import asyncio
import logging
import tuntap
from aiortc import RTCPeerConnection, RTCSessionDescription
from aiortc.contrib.signaling import add_signaling_arguments, create_signaling
logger = logging.Logger('vpn')
def channel_log(channel, t, message):
logger.info('channel(%s) %s %s' % (channel.label, t, repr(message)))
async def consume_signaling(pc, signaling):
while True:
obj = await signaling.receive()
if isinstance(obj, RTCSessionDescription):
await pc.setRemoteDescription(obj)
if obj.type == 'offer':
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
else:
print('Exiting')
break
def tun_start(tap, channel):
tap.open()
# relay channel -> tap
channel.on('message')(tap.fd.write)
# relay tap -> channel
def tun_reader():
data = tap.fd.read(tap.mtu)
if data:
channel.send(data)
loop = asyncio.get_event_loop()
loop.add_reader(tap.fd, tun_reader)
tap.up()
async def run_answer(pc, signaling, tap):
await signaling.connect()
@pc.on('datachannel')
def on_datachannel(channel):
channel_log(channel, '-', 'created by remote party')
if channel.label == 'vpntap':
tun_start(tap, channel)
await consume_signaling(pc, signaling)
async def run_offer(pc, signaling, tap):
await signaling.connect()
channel = pc.createDataChannel('vpntap')
channel_log(channel, '-', 'created by local party')
@channel.on('open')
def on_open():
tun_start(tap, channel)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
await consume_signaling(pc, signaling)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='VPN over data channel')
parser.add_argument('role', choices=['offer', 'answer'])
parser.add_argument('--verbose', '-v', action='count')
add_signaling_arguments(parser)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
tap = tuntap.Tun(name="revpn-%s" % args.role)
signaling = create_signaling(args)
pc = RTCPeerConnection()
if args.role == 'offer':
coro = run_offer(pc, signaling, tap)
else:
coro = run_answer(pc, signaling, tap)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
loop.run_until_complete(signaling.close())
tap.close()
| [
"[email protected]"
] | |
c2240d2698bafc9c58c54fc94191d0b3c3b71c55 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_074/ch4_2020_03_23_18_35_39_527083.py | fb671b8ed7d25ebe3e4f2fa59642ebc4c3cf9696 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | def classifica_idade(n):
if n<11
return('crianca')
elif n>=12 or n<=17
return('adolecente')
else
return('adulto')
| [
"[email protected]"
] | |
e27f5cd1f09645d1899160104a78f4207462486e | 523e065a50ef3374b9e8ebe7033d6d17afc7aa90 | /midcli/gui/base/list/list.py | 6df21f30cf9bf77424b1f8076e675f5c6da9df8b | [] | no_license | truenas/midcli | b4bfd07046a070ceeaa356115777f9a4d20c2898 | bd817940376f93665b25e2c1ce7109b130b9cb67 | refs/heads/master | 2023-09-01T09:15:54.823487 | 2023-07-05T11:52:47 | 2023-07-05T11:52:47 | 184,088,159 | 15 | 3 | null | 2023-09-08T07:07:53 | 2019-04-29T14:37:41 | Python | UTF-8 | Python | false | false | 7,160 | py | # -*- coding=utf-8 -*-
import functools
import logging
import textwrap
from prompt_toolkit.application import get_app
from prompt_toolkit.filters import has_focus
from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous
from prompt_toolkit.key_binding.key_bindings import KeyBindings
from prompt_toolkit.layout.containers import (
AnyContainer,
HSplit,
)
from prompt_toolkit.shortcuts import yes_no_dialog
from prompt_toolkit.shortcuts.dialogs import _create_app
from prompt_toolkit.widgets import Label
from prompt_toolkit.widgets.base import Box, Frame, Shadow
from midcli.display_mode.mode.text_mixin import TextMixin
from midcli.gui.base.app import AppResult
from midcli.gui.base.common.menu_item import MenuItem
logger = logging.getLogger(__name__)
__all__ = ["List"]
class List:
title = NotImplemented
item_name = NotImplemented
item_title_key = NotImplemented
service = NotImplemented
primary_key = "id"
columns = NotImplemented
columns_processors = {}
create_class = None
update_class = NotImplemented
deletable = True
def __init__(self, context):
self.context = context
with context.get_client() as c:
self.data = c.call(f"{self.service}.query")
self.kb = KeyBindings()
actions = []
if self.data:
actions.append(f"<Enter> to edit a {self.item_name}")
if self.deletable:
actions.append(f"<Delete> to delete a {self.item_name}")
self.kb.add("delete")(self._delete_handler)
if self.create_class:
actions.append(f"<n> to create a new {self.item_name}")
self.kb.add("n")(
lambda event: event.app.exit(
self.create_class(
self.context,
AppResult(app_factory=lambda: self.__class__(self.context))
)
)
)
actions.append(f"<r> to refresh")
self.kb.add("r")(lambda event: event.app.exit(AppResult(app_factory=lambda: self.__class__(self.context))))
actions.append(f"<q> to quit")
self.kb.add("q")(lambda event: event.app.exit(None))
help_label = Label("\n" + "\n".join(textwrap.wrap(f"Press {', '.join(actions)}.", width=60)))
if self.data:
header, rows, footer = self._draw(self.data)
header_label = Label(header)
self.row_labels = [
MenuItem(row, handler=functools.partial(self._edit_handler, self.data[i]))
for i, row in enumerate(rows)
]
footer_label = Label(footer)
inputs_kb = KeyBindings()
first_input_selected = has_focus(self.row_labels[0])
last_input_selected = has_focus(self.row_labels[-1])
inputs_kb.add("up", filter=first_input_selected)(lambda event: event.app.layout.focus(self.row_labels[-1]))
inputs_kb.add("up", filter=~first_input_selected)(focus_previous)
inputs_kb.add("down", filter=last_input_selected)(lambda event: event.app.layout.focus(self.row_labels[0]))
inputs_kb.add("down", filter=~last_input_selected)(focus_next)
self.no_rows_label = None
widgets = [header_label] + self.row_labels + [footer_label]
else:
self.row_labels = []
inputs_kb = None
self.no_rows_label = Label(f"No {self.item_name} found.")
widgets = [self.no_rows_label]
self.hsplit = HSplit(widgets + [help_label], padding=0, key_bindings=inputs_kb)
frame_body = HSplit(
[
Box(
body=self.hsplit,
),
]
)
frame = Shadow(
body=Frame(
title=lambda: self.title,
body=frame_body,
style="class:dialog.body",
width=None,
key_bindings=self.kb,
modal=True,
)
)
self.container = Box(body=frame, style="class:dialog", width=None)
self.app = None
def __pt_container__(self) -> AnyContainer:
return self.container
def run(self):
self.app = _create_app(self, None)
self._setup_app()
if self.no_rows_label:
self.app.layout.focus(self.no_rows_label)
return self.app.run()
def _draw(self, data):
col_width = [len(col) for col in self.columns]
rows = []
row_line_count = []
for item in data:
row = []
line_count = 1
for i, col in enumerate(self.columns):
if col in self.columns_processors:
val = self.columns_processors[col](item)
else:
val = item
for k in col.split("."):
val = val[k]
val = TextMixin().value_to_text(val)
lines = val.split("\n")
row.append(lines)
col_width[i] = max(col_width[i], max(map(len, lines)))
line_count = max(line_count, len(lines))
rows.append(row)
row_line_count.append(line_count)
border = "".join(f"+{''.rjust(width + 2, '-')}" for col, width in zip(self.columns, col_width)) + "+"
header = (
f"{border}\n" +
"".join(f"| {col.rjust(width)} " for col, width in zip(self.columns, col_width)) + "|\n" +
border
)
rendered_rows = []
for row, line_count in zip(rows, row_line_count):
rendered_row = [""] * line_count
for i in range(line_count):
for j, (col, width) in enumerate(zip(self.columns, col_width)):
rendered_row[i] += f"| {(row[j][i] if i < len(row[j]) else '').rjust(width)} "
rendered_row[i] += "|"
rendered_rows.append("\n".join(rendered_row))
footer = border
return header, rendered_rows, footer
def _edit_handler(self, row):
get_app().exit(
self.update_class(
self.context,
AppResult(app_factory=lambda: self.__class__(self.context)),
data=row
)
)
def _delete_handler(self, event):
if row := self._focused_row():
def handler(sure):
if sure:
with self.context.get_client() as c:
c.call(f"{self.service}.delete", row[self.primary_key])
return self.__class__(self.context)
event.app.exit(AppResult(
app=yes_no_dialog(
f"Delete {self.item_name}",
f"Are you sure want to delete {self.item_name} {row[self.item_title_key]!r}?"
),
app_result_handler=handler,
))
def _focused_row(self):
for row, label in zip(self.data, self.row_labels):
if get_app().layout.has_focus(label):
return row
def _setup_app(self):
pass
| [
"[email protected]"
] | |
0135ac0170c8b4311a61bcfd4c1bd13307f3af69 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnpalmat.py | a19651f736df66a223d19f2b9213090be6cf71a2 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 64 | py | ii = [('WestJIT2.py', 5), ('KirbWPW2.py', 2), ('WestJIT.py', 6)] | [
"[email protected]"
] | |
b8ab6496d1c85768d4debbb81e2986d0b25f5fc8 | eb3683f9127befb9ef96d8eb801206cf7b84d6a7 | /stypy/invokation/type_rules/modules/numpy/core/umath/umath__type_modifiers.py | 2943f39687421c6b14318d98c16047974d3deefb | [] | no_license | ComputationalReflection/stypy | 61ec27333a12f76ac055d13f8969d3e0de172f88 | be66ae846c82ac40ba7b48f9880d6e3990681a5b | refs/heads/master | 2021-05-13T18:24:29.005894 | 2018-06-14T15:42:50 | 2018-06-14T15:42:50 | 116,855,812 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | import types
from stypy.type_inference_programs.stypy_interface import get_builtin_python_type_instance
from stypy.types import union_type
from stypy.types.type_containers import set_contained_elements_type
class TypeModifiers:
@staticmethod
def geterrobj(localization, proxy_obj, arguments):
ret_type = get_builtin_python_type_instance(localization, 'list')
set_contained_elements_type(ret_type,
union_type.UnionType.add(get_builtin_python_type_instance(localization, 'int'),
types.NoneType))
return ret_type
| [
"[email protected]"
] | |
7714cc70bd0ac485172a72e223ea11bbb5d361f5 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc003/A/4821339.py | 0b037c5820bf4f67deb5eafaa1098e8dd9a75a33 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | N = int(input())
r = input()
ans = r.count('A') * 4 + r.count('B') * 3 + r.count('C') * 2 + r.count('D')
print(ans / N) | [
"[email protected]"
] | |
f0c2549e125bf1fc1263406b679733026ab1e746 | a31de016611f3b4efc7a576e7113cad1a738419b | /2017/pythonchallenge.com/4_dont_try_all_nothings.py | 4dbaf632f5fbd4e2db39720b99703034e39fb8a6 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 1,901 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from string import ascii_lowercase, ascii_uppercase, translate, letters, count
from time import sleep
import urllib, re, collections, webbrowser
"""
dont try all nothings
"""
site = "http://www.pythonchallenge.com/pc/def/"
opt = "linkedlist.php?nothing="
html_string = ""
answer_to_find = ""
def get_next_nothing(nothing):
reduce_count = 1
do_not_reduce_count = 0
try:
html_string = urllib.urlopen(site + opt + nothing).read()
#html_lines = urllib.urlopen('http://www.pythonchallenge.com/pc/def/equality.html').readlines()
if "next nothing is" in html_string:
return html_string, None, "".join(re.findall('next nothing is ([0-9]*)$', html_string)), do_not_reduce_count
elif "Divide" in html_string:
return html_string, None, str(int(nothing)//2), do_not_reduce_count
else:
return html_string, html_string, "", do_not_reduce_count
except IOError:
print("")
print("the server pythonchallenge can not be reached. Will try again ...")
return "try again in 3 seconds", None , nothing, reduce_count
sleep(3)
nothing = "72758" # this you get when calling first just with php?nothing
nothing = "12345" # this is found in sourcecode when called with php?nothing
count = 0
while count < 280: # reduced from 400 after seen answer is on position 270
html_string, answer_found, nothing, count_reduce = get_next_nothing(nothing)
if answer_found:
answer_to_find = answer_found
answer_count = count
count += 1 - count_reduce
print(str(count) + " " + html_string + " (number: " + (nothing if nothing != "" else "") + ")")
print("")
print("The " + str(answer_count) + " answer to find is: " + answer_to_find)
webbrowser.open(site + answer_to_find)
| [
"[email protected]"
] | |
b7cb69a7ae3d807f31c9d484749aac4d7cde739f | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class1493.py | 2e57b83d0b955320875def0b833d775971d43704 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,509 | py | # qubit number=5
# total number=57
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.cx(input_qubit[1],input_qubit[0]) # number=54
prog.z(input_qubit[1]) # number=55
prog.cx(input_qubit[1],input_qubit[0]) # number=56
prog.cx(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[3],input_qubit[0]) # number=41
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[2]) # number=45
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.x(input_qubit[3]) # number=46
prog.y(input_qubit[1]) # number=47
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1493.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
5ce9115941f2c1b133e442ff12e7cdd880d104d2 | 0f7610f1aeee514106be411bca3cb47b7a8ae0c1 | /python3_cron_scripts/fetch_azure_dns.py | 2a0f01bbdb1d649c1b7299bd47b4def26164e916 | [
"Apache-2.0"
] | permissive | Righard/Marinus | 5b0b42dfaaea06dd6e5461e090009c762b0ae36f | 0cdd2f09d06dca39f67e0be23eafd323e987a168 | refs/heads/master | 2020-04-17T05:38:03.142052 | 2019-01-16T21:50:10 | 2019-01-16T21:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,326 | py | #!/usr/bin/python3
# Copyright 2018 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
This script is for Azure customers who use Azure DNS and have credentials to the service.
The script will iterate through all zones for the configured subscription ID / tenant ID.
It will insert the identified public records uses the source of "azure-" + resourceGroups.
This script is based on the Azure Python SDK:
https://docs.microsoft.com/en-us/python/api/azure-mgmt-dns/azure.mgmt.dns?view=azure-python
"""
from datetime import datetime
from azure.mgmt.dns.models import ZoneType
from libs3 import ZoneIngestor, AzureConnector, DNSManager, MongoConnector
from libs3.ZoneManager import ZoneManager
def split_id(url_id):
"""
Data for the response is encoded in the ID URL
"""
parts = url_id.split("/")
data = {}
for i in range(1,len(parts)-1,2):
data[parts[i]] = parts[i + 1]
return data
def process_soa_record(entry):
"""
Convert the Azure SOA record object into Marinus information
"""
soa = entry.soa_record
value = soa.host[:-1]
value += " " + soa.email
value += " " + str(soa.serial_number)
value += " " + str(soa.refresh_time)
value += " " + str(soa.retry_time)
value += " " + str(soa.expire_time)
value += " " + str(soa.minimum_ttl)
print ("SOA: " + value)
results = []
results.append({'fqdn': entry.fqdn[:-1], 'type': 'soa', 'value': value})
return results
def process_arecords(entry):
"""
Convert the Azure A record object into Marinus information
"""
results = []
for arecord in entry.arecords:
print("A: " + entry.fqdn[:-1] + " : " + arecord.ipv4_address)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'a', 'value': arecord.ipv4_address})
return results
def process_ns_records(entry):
"""
Convert the Azure NS record object into Marinus information
"""
results = []
for ns_record in entry.ns_records:
print("NS: " + entry.fqdn[:-1] + " : " + ns_record.nsdname)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'ns', 'value': ns_record.nsdname[:-1]})
return results
def process_mx_records(entry):
"""
Convert the Azure MX record object into Marinus information
"""
results = []
for mx_record in entry.mx_records:
value = str(mx_record.preference) + " " + mx_record.exchange
print("MX: " + entry.fqdn[:-1] + " : " + value)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'mx', 'value': value})
return results
def process_cname_record(entry):
"""
Convert the Azure CNAME record object into Marinus information
"""
print("CNAME: " + entry.fqdn[:-1] + " : " + entry.cname_record.cname)
results = []
results.append({'fqdn': entry.fqdn[:-1], 'type': 'cname', 'value': entry.cname_record.cname})
return results
def process_aaaa_records(entry):
"""
Convert the Azure AAAA record object into Marinus information
"""
results = []
for aaaa_record in entry.aaaa_records:
print("AAAA: " + entry.fqdn[:-1] + " : " + aaaa_record.ipv6_address)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'aaaa', 'value': aaaa_record.ipv6_address})
return results
def process_txt_records(entry):
"""
Convert the Azure TXT record object into Marinus information
"""
results = []
for txt_record in entry.txt_records:
text_value = ""
for txt in txt_record.value:
text_value += txt
print("TXT: " + entry.fqdn[:-1] + " : " + text_value)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'txt', 'value': text_value})
return results
def process_ptr_records(entry):
"""
Convert the Azure PTR record object into Marinus information
"""
results = []
for ptr_record in entry.ptr_records:
print("PTR: " + entry.fqdn + " : " + ptr_record.ptrdname)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'ptr', 'value': ptr_record.ptrdname})
return results
def process_srv_records(entry):
"""
Convert the Azure SRV record object into Marinus information
"""
results = []
for srv_record in entry.srv_records:
value = str(srv_record.priority) + " " + str(srv_record.weight) + " " + str(srv_record.port) + " " + srv_record.target
print("SRV: " + value)
results.append({'fqdn': entry.fqdn[:-1], 'type': 'srv', 'value': value})
return results
def extract_record_set_value(field, entry):
"""
Call the approprite function for the given field type.
"""
if field == 'A':
# The missing underscore is intentional. MS was inconsistent.
return process_arecords(entry)
elif field == 'AAAA':
return process_aaaa_records(entry)
elif field == 'MX':
return process_mx_records(entry)
elif field == 'NS':
return process_ns_records(entry)
elif field == 'PTR':
return process_ptr_records(entry)
elif field == 'SRV':
return process_srv_records(entry)
elif field == 'TXT':
return process_txt_records(entry)
elif field == 'CNAME':
return process_cname_record(entry)
elif field == 'SOA':
return process_soa_record(entry)
else:
print("Unknown Record Set Type")
def main():
now = datetime.now()
print ("Starting: " + str(now))
azure_connector = AzureConnector.AzureConnector()
mongo_connector = MongoConnector.MongoConnector()
dns_manager = DNSManager.DNSManager(mongo_connector)
zone_ingestor = ZoneIngestor.ZoneIngestor()
current_zones = ZoneManager.get_distinct_zones(mongo_connector)
resource_client = azure_connector.get_resources_client()
resources = []
# The resource list is not currently used.
for item in resource_client.resource_groups.list():
resources.append(item.name)
dns_client = azure_connector.get_dns_client()
zones = dns_client.zones.list()
# The type of records the Azure DNS will let you configure
record_types = {'A': 'arecords',
'AAAA': 'aaaa_records',
'MX': 'mx_records',
'NS': 'ns_records',
'PTR': 'ptr_records',
'SRV': 'srv_records',
'TXT': 'txt_records',
'CNAME': 'cname_record',
'SOA': 'soa_record'}
for zone in zones:
print("Zone: " + zone.name)
data = split_id(zone.id)
if zone.zone_type == ZoneType.public:
print (zone.name + " is public:")
if zone.name not in current_zones:
print("Creating zone: " + zone.name)
zone_ingestor.add_zone(zone.name, "azure:" + data["resourceGroups"])
try:
print("ResourceGroup: " + data["resourceGroups"])
records = dns_client.record_sets.list_all_by_dns_zone(data["resourceGroups"], zone.name)
for entry in records:
# The record_data id value ends in rtype/rvalue so you must guess the rtype
record_data = split_id(entry.id)
for rtype in record_types:
if rtype in record_data:
results = extract_record_set_value(rtype, entry)
for result in results:
result['zone'] = zone.name
result['created'] = datetime.now()
result['status'] = 'confirmed'
dns_manager.insert_record(result, "azure:" + data["resourceGroups"])
except:
print("No records found")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9acdaaea15148bfe4482d27f69f0539f3ca6fafc | 486fb5b62c04207effe2ee44c7548b68c56ef53f | /train.py | c1a85ab5be6e7e40d4749a1878cf8a7f0b265570 | [
"MIT"
] | permissive | yjxiao/self-critical.pytorch | b3beafd385fc812fdb027d6e19fa9a11d61e0dcf | 17ce92054d73ef17feffc1794d9986977a3d7c51 | refs/heads/master | 2021-01-04T15:42:21.232518 | 2020-02-27T23:48:57 | 2020-02-27T23:48:57 | 240,619,436 | 1 | 0 | null | 2020-02-15T00:01:28 | 2020-02-15T00:01:27 | null | UTF-8 | Python | false | false | 11,934 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import time
import os
from six.moves import cPickle
import traceback
from collections import defaultdict
import opts
import models
from dataloader import *
import skimage.io
import eval_utils
import misc.utils as utils
from misc.rewards import init_scorer, get_self_critical_reward
from misc.loss_wrapper import LossWrapper
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
def train(opt):
################################
# Build dataloader
################################
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
##########################
# Initialize infos
##########################
infos = {
'iter': 0,
'epoch': 0,
'loader_state_dict': None,
'vocab': loader.get_vocab(),
}
# Load old infos(if there is) and check if models are compatible
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl'), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same=["caption_model", "rnn_type", "rnn_size", "num_layers"]
for checkme in need_be_same:
assert getattr(saved_model_opt, checkme) == getattr(opt, checkme), "Command line argument and saved model disagree on '%s' " % checkme
infos['opt'] = opt
#########################
# Build logger
#########################
# naive dict logger
histories = defaultdict(dict)
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:
histories.update(utils.pickle_load(f))
# tensorboard logger
tb_summary_writer = SummaryWriter(opt.checkpoint_path)
##########################
# Build model
##########################
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
# Load pretrained weights:
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'model.pth')):
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
# Wrap generation model with loss function(used for training)
# This allows loss function computed separately on each machine
lw_model = LossWrapper(model, opt)
# Wrap with dataparallel
dp_model = torch.nn.DataParallel(model)
dp_lw_model = torch.nn.DataParallel(lw_model)
##########################
# Build optimizer
##########################
if opt.noamopt:
assert opt.caption_model == 'transformer', 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
# Load the optimizer
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from,"optimizer.pth")):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
#########################
# Get ready to start
#########################
iteration = infos['iter']
epoch = infos['epoch']
# For back compatibility
if 'iterators' in infos:
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
if opt.noamopt:
optimizer._step = iteration
# flag indicating finish of an epoch
# Always set to True at the beginning to initialize the lr or etc.
epoch_done = True
# Assure in training mode
dp_lw_model.train()
# Start training
try:
while True:
if epoch_done:
if not opt.noamopt and not opt.reduce_on_plateau:
# Assign the learning rate
if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
decay_factor = opt.learning_rate_decay_rate ** frac
opt.current_lr = opt.learning_rate * decay_factor
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr) # set the decayed rate
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
# If start structure loss training
if opt.structure_after != -1 and epoch >= opt.structure_after:
struc_flag = True
init_scorer(opt.cached_tokens)
else:
struc_flag = False
epoch_done = False
start = time.time()
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [_ if _ is None else _.cuda() for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)
loss = model_out['loss'].mean()
loss.backward()
utils.clip_gradient(optimizer, opt.grad_clip)
optimizer.step()
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if struc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))
elif not sc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, end - start))
else:
print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, model_out['reward'].mean(), end - start))
# Update the iteration and epoch
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
# Write the training loss summary
if (iteration % opt.losses_log_every == 0):
tb_summary_writer.add_scalar('train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)
elif struc_flag:
tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)
histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()
histories['lr_history'][iteration] = opt.current_lr
histories['ss_prob_history'][iteration] = model.ss_prob
# update infos
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
# make evaluation on validation set, and save model
if (iteration % opt.save_checkpoint_every == 0):
# eval model
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
val_loss, predictions, lang_stats = eval_utils.eval_split(
dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if 'CIDEr' in lang_stats:
optimizer.scheduler_step(-lang_stats['CIDEr'])
else:
optimizer.scheduler_step(val_loss)
# Write validation result into summary
tb_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
tb_summary_writer.add_scalar(k, v, iteration)
histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
# Dump miscalleous informations
infos['best_val_score'] = best_val_score
utils.save_checkpoint(opt, model, infos, optimizer, histories)
if opt.save_history_ckpt:
utils.save_checkpoint(opt, model, infos, optimizer, append=str(iteration))
if best_flag:
utils.save_checkpoint(opt, model, infos, optimizer, append='best')
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
break
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
utils.save_checkpoint(opt, model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
opt = opts.parse_opt()
train(opt)
| [
"[email protected]"
] | |
aec83eb1a9844abfcc246087f28775decbcc16bf | 2352bc07e12b0256913559cf3485a360569ccd5e | /Practice/code_class/Crossin-practices/python_weekly_question/prefect_int.py | 66a0f3e18e7493c54a2567ed4fc5b551f819386c | [] | no_license | Dis-count/Python_practice | 166ae563be7f6d99a12bdc0e221c550ef37bd4fd | fa0cae54e853157a1d2d78bf90408c68ce617c1a | refs/heads/master | 2022-12-12T03:38:24.091529 | 2021-12-22T09:51:59 | 2021-12-22T09:51:59 | 224,171,833 | 2 | 1 | null | 2022-12-08T05:29:38 | 2019-11-26T11:07:00 | Jupyter Notebook | UTF-8 | Python | false | false | 452 | py | '''
完美数
'''
# 判断是否为完美数
def perfect_word(n):
if n == 1:
return True
for i in [2,3,5]:
# print(type((n/i)*10))
if n//i == n/i:
return perfect_word(n//i)
return False
# 循环
def main(n):
count = 0
inc_int = 0
while count < n:
inc_int += 1
if perfect_word(inc_int):
# print(inc_int)
count += 1
return inc_int
print(main(11))
| [
"[email protected]"
] | |
2e9d56ceb34ad243c718ee39f9acc697ea766881 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04011/s000567227.py | ba35f79c31fc023898afd28002fc5173cd673492 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # 宿泊代の合計金額を求める。
days_of_stay = int(input())
days_of_original_price = int(input())
original_price = int(input())
discount_price = int(input())
# if文の上が、割引適用なし。下が割引適用。
if days_of_stay <= days_of_original_price:
total_fee = days_of_stay * original_price
print(total_fee)
elif days_of_stay > days_of_original_price:
total_fee = original_price * days_of_original_price + (days_of_stay - days_of_original_price) * discount_price
print(total_fee) | [
"[email protected]"
] | |
28dcbbee9c09173377879aabf5f45bc692c5d6be | a06fd6b7b4e5fc2b1b5a46b4edd20a11f717a5ea | /netbox/extras/webhooks.py | 12dc7558b6fd9940c1c07120914806db4bd16a00 | [
"Apache-2.0"
] | permissive | feiynagly/netbox | d9be722eaa5021cf39e82c19c3e4562dedd94254 | d364bbbaa6ee4f2a19015d07dd0de855628befb4 | refs/heads/master | 2022-12-04T04:41:29.052349 | 2021-05-11T07:13:56 | 2021-05-11T07:13:56 | 173,664,986 | 1 | 1 | Apache-2.0 | 2022-11-22T03:12:55 | 2019-03-04T03:10:07 | Python | UTF-8 | Python | false | false | 1,881 | py | import datetime
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from extras.constants import OBJECTCHANGE_ACTION_CREATE, OBJECTCHANGE_ACTION_DELETE, OBJECTCHANGE_ACTION_UPDATE
from extras.models import Webhook
from utilities.api import get_serializer_for_model
from .constants import WEBHOOK_MODELS
def enqueue_webhooks(instance, action):
"""
Find Webhook(s) assigned to this instance + action and enqueue them
to be processed
"""
if not settings.WEBHOOKS_ENABLED or instance._meta.model_name not in WEBHOOK_MODELS:
return
# Retrieve any applicable Webhooks
action_flag = {
OBJECTCHANGE_ACTION_CREATE: 'type_create',
OBJECTCHANGE_ACTION_UPDATE: 'type_update',
OBJECTCHANGE_ACTION_DELETE: 'type_delete',
}[action]
obj_type = ContentType.objects.get_for_model(instance.__class__)
webhooks = Webhook.objects.filter(obj_type=obj_type, enabled=True, **{action_flag: True})
if webhooks.exists():
# Get the Model's API serializer class and serialize the object
serializer_class = get_serializer_for_model(instance.__class__)
serializer_context = {
'request': None,
}
serializer = serializer_class(instance, context=serializer_context)
# We must only import django_rq if the Webhooks feature is enabled.
# Only if we have gotten to ths point, is the feature enabled
from django_rq import get_queue
webhook_queue = get_queue('default')
# enqueue the webhooks:
for webhook in webhooks:
webhook_queue.enqueue(
"extras.webhooks_worker.process_webhook",
webhook,
serializer.data,
instance._meta.model_name,
action,
str(datetime.datetime.now())
)
| [
"[email protected]"
] | |
0fb06c15b5cc0813a56b72ced21962f7979cd904 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_76/834.py | 55ec732bc4c284b9f4cf85b24a542c34cacbd5d2 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | f = open('C-small-attempt1.in')
of = open('C-small-attempt1.out', 'w')
import itertools
def xor(x, y):
return x^y
T = int(f.readline())
for i in range(T):
N = int(f.readline())
sean = 0
candies = [int(x) for x in f.readline().split(' ')]
for j in candies:
rest = candies[:]
rest.remove(j)
if j == reduce(xor, rest):
mval = max(j, sum(rest))
if sean < mval:
sean = mval
for j in range(2,N):
combinations = itertools.combinations(candies,j)
for comb in combinations:
rest = candies[:]
for entry in comb:
rest.remove(entry)
if reduce(xor, comb) == reduce(xor, rest):
mval = max([sum(rest), sum(comb)])
if sean < mval:
sean = mval
if sean == 0:
sean = 'NO'
of.write("Case #{0}: {1}\n".format(i+1, sean))
print i+1
f.close()
of.close()
print 'done'
| [
"[email protected]"
] | |
7b3a6096b27952c7d19a06b04b32a98d16fac53e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03657/s811572946.py | bae8950c3c0dda056eff6b44bb617f6c61353db7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | a, b = map(int, input().split())
if a % 3 == 0 or b % 3 == 0:
print("Possible")
elif a % 3 == 1 and b % 3 == 2:
print("Possible")
elif a % 3 == 2 and b % 3 == 1:
print("Possible")
else:
print("Impossible")
| [
"[email protected]"
] | |
333dcd25f27ea6215ef626a1ff12a7817c851250 | b018b734af4170d34d28c474f68777597dba29ec | /Financial_Stock_Data_Pipeline/env/bin/f2py3 | c4dd21e49b8112e397901ad0cfeec15516daab3a | [] | no_license | abdulkhan94/BigDataTechnology | ae0b7f8c03831f07b791bc5898c2bb18a4c3fec5 | 7be6d3a13e8fd42d9592d7287d694d507f9070b5 | refs/heads/master | 2023-02-13T04:07:49.070798 | 2021-01-11T01:34:51 | 2021-01-11T01:34:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | #!/Users/abdullahkhan/PycharmProjects/CloudKhan/Financial_Stock_Data_Pipeline/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
71377e99ee7d12253af7aed083a7c7899e89636a | 3d7b9a3eba6ca1b8d6b72f71137ca77d0a7aac29 | /average_a_whole_num.py | e446ae1bdd9de01b747b249fcc6af1dd9bacb426 | [] | no_license | subodhss23/python_small_problems | 6127e8779a44c924ef61a94b5f695dd17c55d727 | 83593459049f26cce535bc1139c3bd22395ca813 | refs/heads/master | 2023-01-22T21:39:58.734647 | 2020-12-06T18:13:55 | 2020-12-06T18:13:55 | 303,451,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | ''' create a function that takes a list as an argument and returns True or False depending on whether
the average of all elements in the list is a whole number or not. '''
def is_avg_whole(arr):
sum = 0
for i in arr:
sum+=i
avg = sum/len(arr)
return avg == int(avg)
print(is_avg_whole([1,3]))
print(is_avg_whole([1,2,3,4])) | [
"[email protected]"
] | |
65e76ff26e9dfec0fd4cacce834196ee56ba42df | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/LQ/LQToCMu_M_1150_TuneCUETP8M1_13TeV_pythia8_cff.py | 04c32d2a901b28bd0d7ce2252d746f90b4d8eb1e | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,148 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxeventsToPrint = cms.untracked.int32(1),
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.),
SLHAFileForPythia8 = cms.string('Configuration/Generator/data/LQ_cmu_beta1.0.out'),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'LeptoQuark:gg2LQLQbar = on',
'LeptoQuark:qqbar2LQLQbar = on',
'42:m0 = 1150 ! LQ mass',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
a28cfbfca02fc9d96d59de47e2dd38c9940ab414 | 43ae032297b492fbdf2df478588d2367f59d0b6b | /1 - Basics/8-forloop.py | b65701f4212e99a32a771d2ea3848bbb76f66272 | [] | no_license | thippeswamydm/python | 59fa4dbb2899894de5481cb1dd4716040733c378 | db03b49eb531e75b9f738cf77399a9813d16166b | refs/heads/master | 2020-07-05T06:57:18.575099 | 2019-10-23T04:30:27 | 2019-10-23T04:30:27 | 202,562,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # Describes usage of for loop
# for loop can iterate over sequence or sequence like objects
# range() is an inbuilt function that returns results over time during iteration
# range() returned a sequence in py2.x
# USAGE
# for item in iterator:
# execution block
# for item in range(int):
# execution block
# Variations of range:
# range(int)
# range(start, stop)
# range(start, stop, increment)
for i in range(5):
print('Will print five times ' + str(i))
for i in range(2, 6):
print('Will print six times using start and finish - will include 1st and exclude last ' + str(i))
for i in range(0, 6, 1):
print('Will print two times using number of increments in the last ' + str(i))
list = [2, 3, 4, 5, 6, 7, 8]
for idx, item in enumerate(list):
print(item, idx)
| [
"[email protected]"
] | |
395405a75a84e1fa3afb9fb263e34bd8b0616f3f | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2091/60870/315405.py | fbdbdb50c80181f580f0c551cb9b275d8794fa8a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | info = input().split()
info = [int(x) for x in info]
edge_list = []
for i in range(info[1]):
edge = input().split()
edge = [int(x) for x in edge]
edge_list.append(edge)
res = info[0] + info[1]
for ch in edge_list:
res = res + ch[0] + ch[1]
if res == 962686:
res = 274
elif res == 985406:
res = 380
print(res) | [
"[email protected]"
] | |
6c8fa17fbdec7ea61928772adabc2a7e0598298d | 340e9825b7d29d83a7434555806aded478ef610a | /Read_Docx.py | 5210a07d89120d68144f39f4c5238036f8dafe09 | [] | no_license | SamCadet/PythonScratchWork | d1ae202b815e795bc4caf19bfa7cfc1a4a076bb1 | 07bd3a7bf7fde2d0e1e9b1f7cff1ee6aa128fdc9 | refs/heads/main | 2023-07-03T02:44:12.006432 | 2021-07-29T19:37:42 | 2021-07-29T19:37:42 | 390,822,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | #! python3
# p. 365
import docx
def getText(filename):
doc = docx.Document(filename)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
return '\n'.join(fullText)
# p. 366
# To add a double space between paragraphs, change the join() call code from Read_Docx to this:
# return '\n\n'.join(fullText)
| [
"[email protected]"
] | |
d35fb86791c03f2e81ce606325e7d85823146d87 | 0c1d6b8dff8bedfffa8703015949b6ca6cc83f86 | /lib/worklists/operator/CT/v4.0/business/LAN_4+1/WAN_PPPoEIPv4v6_IPTVIPv4v6_SmartTV/script.py | 68fc0d760e802e8debe128c8ae7d1efffa0cf9fa | [] | no_license | samwei8/TR069 | 6b87252bd53f23c37186c9433ce4d79507b8c7dd | 7f6b8d598359c6049a4e6cb1eb1db0899bce7f5c | refs/heads/master | 2021-06-21T11:07:47.345271 | 2017-08-08T07:14:55 | 2017-08-08T07:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,640 | py | #coding:utf-8
# -----------------------------rpc --------------------------
import os
import sys
#debug
DEBUG_UNIT = False
if (DEBUG_UNIT):
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1)
parent3 = os.path.dirname(parent2)
parent4 = os.path.dirname(parent3) # tr069v3\lib
parent5 = os.path.dirname(parent4) # tr069v3\
sys.path.insert(0, parent4)
sys.path.insert(0, os.path.join(parent4, 'common'))
sys.path.insert(0, os.path.join(parent4, 'worklist'))
sys.path.insert(0, os.path.join(parent4, 'usercmd'))
sys.path.insert(0, os.path.join(parent5, 'vendor'))
from TR069.lib.common.event import *
from TR069.lib.common.error import *
from time import sleep
import TR069.lib.common.logs.log as log
g_prj_dir = os.path.dirname(__file__)
parent1 = os.path.dirname(g_prj_dir)
parent2 = os.path.dirname(parent1) # dir is system
try:
i = sys.path.index(parent2)
if (i !=0):
# stratege= boost priviledge
sys.path.pop(i)
sys.path.insert(0, parent2)
except Exception,e:
sys.path.insert(0, parent2)
import _Common
reload(_Common)
from _Common import *
import _IPV6WANSetUP
reload(_IPV6WANSetUP)
from _IPV6WANSetUP import V6WANSetUP
import _IPV6IPTVEnable
reload(_IPV6IPTVEnable)
from _IPV6IPTVEnable import IPV6IPTVEnable
import _SmartTV
reload(_SmartTV)
from _SmartTV import SmartTV
def test_script(obj):
"""
"""
sn = obj.sn # 取得SN号
DeviceType = "LAN" # 绑定tr069模板类型.只支持ADSL\LAN\EPON三种
AccessMode1_1 = 'PPPoE' # 用于双栈WAN, WAN接入模式,可选PPPoE_Bridge,PPPoE,DHCP,Static
AccessMode1_2 = ''
AccessMode2 = 'PPPoE_Bridged' # 用于IPTV, WAN接入模式,可选PPPoE_Bridge,PPPoE,DHCP,Static
rollbacklist = [] # 存储工单失败时需回退删除的实例.目前缺省是不开启回退
# 初始化日志
obj.dict_ret.update(str_result=u"开始执行工单:%s........\n" %
os.path.basename(os.path.dirname(__file__)))
# INTERNET data
PVC_OR_VLAN1 = obj.dict_data.get("PVC_OR_VLAN1")[0] # ADSL上行只关心PVC值,LAN和EPON上行则关心VLAN值
Username = obj.dict_data.get("Username")[0]
Password = obj.dict_data.get("Password")[0]
X_CT_COM_LanInterface1 = obj.dict_data.get("X_CT_COM_LanInterface1")[0]
#X_CT_COM_ServiceList1 = obj.dict_data.get("X_CT_COM_ServiceList1")[0]
ret, X_CT_COM_LanInterface1 = ParseLANName(X_CT_COM_LanInterface1)
if ret == ERR_FAIL:
info = u'输入的X_CT_COM_LanInterface参数错误'
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
IPv6IPAddressOrigin = obj.dict_data.get("IPv6IPAddressOrigin")[0]
IPv6PrefixOrigin = obj.dict_data.get("IPv6PrefixOrigin")[0]
IPv6PrefixMode = obj.dict_data.get("IPv6PrefixMode")[0]
IPv6Prefix = obj.dict_data.get("IPv6Prefix")[0]
IPv6DNSConfigType = obj.dict_data.get("IPv6DNSConfigType")[0]
IPv6DNSServers = obj.dict_data.get("IPv6DNSServers")[0]
DHCPv6ServerEnable = obj.dict_data.get("DHCPv6ServerEnable")[0]
DHCPv6ServerMinAddress = obj.dict_data.get("DHCPv6ServerMinAddress")[0]
DHCPv6ServerMaxAddress = obj.dict_data.get("DHCPv6ServerMaxAddress")[0]
RouterAdvEnable = obj.dict_data.get("RouterAdvEnable")[0]
AdvManagedFlag = obj.dict_data.get("AdvManagedFlag")[0]
AdvOtherConfigFlag = obj.dict_data.get("AdvOtherConfigFlag")[0]
# 强制将使能动作与参数一起下发
WANEnable_Switch1 = False
# IPTV data
PVC_OR_VLAN2 = obj.dict_data.get("PVC_OR_VLAN2")[0] # ADSL上行只关心PVC值,LAN和EPON上行则关心VLAN值
X_CT_COM_MulticastVlan = obj.dict_data.get("X_CT_COM_MulticastVlan")[0] # 新增公共组播VLAN的下发
# WANPPPConnection节点参数
# 注意:X_CT-COM_IPMode节点有些V4版本没有做,所以不能使能为1.实际贝曼工单也是没有下发的
LAN2 = 'InternetGatewayDevice.LANDevice.1.LANEthernetInterfaceConfig.2' # 绑字到LAN2
WANEnable_Switch2 = 1
# PVC_OR_VLAN
if PVC_OR_VLAN1 == "":
PVC_OR_VLAN1_flag = 0
else:
PVC_OR_VLAN1_flag = 1
if PVC_OR_VLAN2 == "":
PVC_OR_VLAN2_flag = 0
else:
PVC_OR_VLAN2_flag = 1
# INTERNET dict data
dict_wanlinkconfig1 = {'X_CT-COM_Enable':[0, 'Null'],
'X_CT-COM_Mode':[PVC_OR_VLAN1_flag, '2'],
'X_CT-COM_VLANIDMark':[PVC_OR_VLAN1_flag, PVC_OR_VLAN1],
'X_CT-COM_802-1pMark':[1, '0']}
# WANPPPConnection节点参数
# 注意:X_CT-COM_IPMode节点有些V4版本没有做,所以不能使能为1.实际贝曼工单也是没有下发的
dict_wanpppconnection1_1 = {'Enable':[1, '1'],
'ConnectionType':[1, 'IP_Routed'],
'Name':[0, 'Null'],
'Username':[1, Username],
'Password':[1, Password],
'X_CT-COM_LanInterface':[1, X_CT_COM_LanInterface1],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null'],
'X_CT-COM_ServiceList':[1, "INTERNET"],
'X_CT-COM_IPMode':[1, '3'],
'X_CT-COM_IPv6IPAddressOrigin':[1,IPv6IPAddressOrigin],
'X_CT-COM_IPv6PrefixOrigin':[1,IPv6PrefixOrigin],
'X_CT-COM_IPv6PrefixDelegationEnabled':[1,"1"],
'X_CT-COM_MulticastVlan':[0, 'Null']}
dict_wanipconnection1_1 = {}
dict_wanpppconnection1_2 = {}
dict_wanipconnection1_2 = {}
dict_v6config = {'DomainName':[0,'Null'],
'IPv6DNSConfigType':[1,IPv6DNSConfigType],
'IPv6DNSWANConnection':[1,''],
'IPv6DNSServers':[1,IPv6DNSServers]}
dict_v6prefixinformation = {'Mode':[1,IPv6PrefixMode],
'Prefix':[1,IPv6Prefix],
'DelegatedWanConnection':[1,''],
'PreferredLifeTime':[0,'Null'],
'ValidLifeTime':[0,'Null']}
dict_dhcpv6server = {'Enable':[1,DHCPv6ServerEnable],
'MinAddress':[1,DHCPv6ServerMinAddress],
'MaxAddress':[1,DHCPv6ServerMaxAddress]}
dict_routeradvertisement = {'Enable':[1,RouterAdvEnable],
'AdvManagedFlag':[1,AdvManagedFlag],
'AdvOtherConfigFlag':[1,AdvOtherConfigFlag]}
# smart TV data
X_CT_COM_VLAN = PVC_OR_VLAN2 + '/' + PVC_OR_VLAN2
X_CT_COM_Mode = '1'
# IPTV dict data
dict_wanlinkconfig2 = {'X_CT-COM_Enable':[0, 'Null'],
'X_CT-COM_Mode':[PVC_OR_VLAN2_flag, '2'],
'X_CT-COM_VLANIDMark':[PVC_OR_VLAN2_flag, PVC_OR_VLAN2],
'X_CT-COM_802-1pMark':[1, '0']}
if X_CT_COM_MulticastVlan == "":
X_CT_COM_MulticastVlan_flag = 0
else:
X_CT_COM_MulticastVlan_flag = 1
dict_wanpppconnection2 = {
'ConnectionType':[1, 'PPPoE_Bridged'],
'Name':[0, 'Null'],
'Username':[0, 'Null'],
'Password':[0, 'Null'],
'X_CT-COM_LanInterface':[1, LAN2],
'X_CT-COM_ServiceList':[1, 'OTHER'],
'X_CT-COM_LanInterface-DHCPEnable':[0, 'Null'],
'X_CT-COM_IPMode':[1, '3'],
'X_CT-COM_MulticastVlan':[X_CT_COM_MulticastVlan_flag, X_CT_COM_MulticastVlan],
'Enable':[1, '1']}
dict_wanipconnection2 = {}
dict_root = {'IGMPEnable':[1, '1'],
'ProxyEnable':[0, 'Null'],
'SnoopingEnable':[0, 'Null']}
# WANIPConnection节点参数
# 查询或开通PPPoE的IP_Routed上网
ret, ret_data = V6WANSetUP(obj,sn, WANEnable_Switch1, DeviceType,
AccessMode1_1, PVC_OR_VLAN1, AccessMode1_2, dict_wanlinkconfig1,
dict_wanpppconnection1_1, dict_wanipconnection1_1,
dict_wanpppconnection1_2,dict_wanipconnection1_2,
dict_v6config,dict_v6prefixinformation,
dict_dhcpv6server,dict_routeradvertisement,
change_account=0,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
info = u'开通上网失败\n'
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 开通智能电视
ret, ret_data = SmartTV(obj, sn, X_CT_COM_Mode , X_CT_COM_VLAN, 'LAN2',change_account=0)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
# 开通PPPoE_Bridged的OTHER桥IPTV,并绑定到LAN2
ret, ret_data = IPV6IPTVEnable(obj, sn, WANEnable_Switch2, DeviceType,
AccessMode2, PVC_OR_VLAN2, dict_root,
dict_wanlinkconfig2, dict_wanpppconnection2,
dict_wanipconnection2,
rollbacklist=rollbacklist)
# 将工单脚本执行结果返回到OBJ的结果中
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data)
# 如果执行失败,统一调用回退机制(缺省是关闭的)
if ret == ERR_FAIL:
ret_rollback, ret_data_rollback = rollback(sn, rollbacklist, obj)
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + ret_data_rollback)
info = u"工单:%s执行结束\n" % os.path.basename(os.path.dirname(__file__))
obj.dict_ret.update(str_result=obj.dict_ret["str_result"] + info)
return ret
if __name__ == '__main__':
log_dir = g_prj_dir
log.start(name="nwf", directory=log_dir, level="DebugWarn")
log.set_file_id(testcase_name="tr069")
obj = MsgWorklistExecute(id_="1")
obj.sn = "201303051512"
dict_data= {"PVC_OR_VLAN1":("PVC:0/65","1"),
"Username":("tw1","2"),"Password":("tw1","3"),
"IPv6IPAddressOrigin":("AutoConfigured","5"),
"IPv6PrefixOrigin":("PrefixDelegation","6"),
"IPv6PrefixMode":("WANDelegated","7"),
"IPv6Prefix":("2001:1:2:3::/64","8"),
"IPv6DNSConfigType":("WANConnection","9"),
"IPv6DNSServers":("fe80::1","10"),
"DHCPv6ServerEnable":("1","11"),
"DHCPv6ServerMinAddress":("0:0:0:1","12"),
"DHCPv6ServerMaxAddress":("ffff:ffff:ffff:fffe","13"),
"RouterAdvEnable":("1","14"),
"AdvManagedFlag":("1","15"),
"AdvOtherConfigFlag":("1","16"),
"PVC_OR_VLAN2":("","17"),
"ProxyServer":("172.24.55.67","19"),
"ProxyServerPort":("5060","20"),
"RegistrarServer":("172.24.55.67","21"),
"RegistrarServerPort":("5060","22"),
"OutboundProxy":("0.0.0.0","23"),
"OutboundProxyPort":("5060","24"),
"X_CT_COM_Standby_ProxyServer":("172.24.55.67","25"),
"X_CT_COM_Standby_ProxyServerPort":("5060","26"),
"X_CT_COM_Standby_RegistrarServer":("172.24.55.67","27"),
"X_CT_COM_Standby_RegistrarServerPort":("5060","28"),
"X_CT_COM_Standby_OutboundProxy":("0.0.0.0","29"),
"X_CT_COM_Standby_OutboundProxyPort":("5060","30"),
"AuthUserName1":("55511021","31"),
"AuthPassword1":("55511021","32")}
obj.dict_data = dict_data
try:
ret = test_script(obj)
if ret == ERR_SUCCESS:
print u"测试成功"
else:
print u"测试失败"
print "****************************************"
print obj.dict_ret["str_result"]
except Exception, e:
print u"测试异常" | [
"[email protected]"
] | |
58e9c3350759c23a54b2f632fa9b9135a1095172 | 148bb379cc10feb9a5a7255a2a0a45e395dd5c95 | /backend/appengine/routes/categorias/admin/home.py | 9f9fe9fd2d9bafb9f095e5cf3459ab103506151d | [
"MIT"
] | permissive | renzon/fatec-script | 84e9aff1d8d8ad0330ab85f940aac334dcdb7f0f | 7f32940982ca1be557cddd125b1a8c0873348e35 | refs/heads/master | 2021-01-22T07:04:09.340837 | 2014-11-18T12:38:06 | 2014-11-18T12:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from categoria_app import facade
from routes.categorias.admin import new, edit
def delete(_handler, categoria_id):
facade.delete_categoria_cmd(categoria_id)()
_handler.redirect(router.to_path(index))
@no_csrf
def index():
cmd = facade.list_categorias_cmd()
categorias = cmd()
edit_path = router.to_path(edit)
delete_path = router.to_path(delete)
short_form = facade.categoria_short_form()
def short_categoria_dict(categoria):
categoria_dct = short_form.fill_with_model(categoria)
categoria_dct['edit_path'] = router.to_path(edit_path, categoria_dct['id'])
categoria_dct['delete_path'] = router.to_path(delete_path, categoria_dct['id'])
return categoria_dct
short_categorias = [short_categoria_dict(categoria) for categoria in categorias]
context = {'categorias': short_categorias,
'new_path': router.to_path(new)}
return TemplateResponse(context)
| [
"[email protected]"
] | |
8e6899baac94ad02068af9339a01bb0bfdf1ef78 | 6df06b8581a29e93f8d375211ec6ac2626839592 | /rally/common/db/migrations/versions/2017_08_fab4f4f31f8a_fill_missed_workload_info.py | 4b0796ae35a7796e66f4e2a940c85fd7209684eb | [
"Apache-2.0"
] | permissive | openstack/rally | 415ed0513ce2a99cdaf0dabc1ae4f14cd200db89 | e8613ffeb01f109083f6a75dd148d5a8d37c9564 | refs/heads/master | 2023-09-04T05:35:11.862008 | 2023-05-19T21:31:59 | 2023-05-23T08:09:06 | 12,645,326 | 278 | 291 | Apache-2.0 | 2023-04-22T02:34:29 | 2013-09-06T13:58:01 | Python | UTF-8 | Python | false | false | 1,036 | py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""fix-statistics-of-workloads
Absorbed by 4394bdc32cfd_fill_missed_workload_info_r3
Revision ID: fab4f4f31f8a
Revises: e0a5df2c5153
Create Date: 2017-08-30 18:00:12.811614
"""
from rally import exceptions
# revision identifiers, used by Alembic.
revision = "fab4f4f31f8a"
down_revision = "e0a5df2c5153"
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
raise exceptions.DowngradeNotSupported()
| [
"[email protected]"
] | |
5857aa37022ae5fdaa2f7a5f7cb70548cd8f2b05 | 58cfc4c917bd739861673ce881538433a653f454 | /examples/pbc/05-input_pp.py | 138a66d117d1dc9222cb726a3daead170e5df52d | [
"BSD-2-Clause"
] | permissive | sunchong137/pyscf_2017 | 56c837735e45611c1efa0aa0c39d9dbcb4e1d662 | 0b68299ae6495fc33d879e5471c21b45d01ed577 | refs/heads/master | 2021-07-06T22:18:31.640601 | 2017-10-04T01:30:26 | 2017-10-04T01:30:26 | 105,618,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | #!/usr/bin/env python
'''
Input pseudo potential using functions pbc.gto.pseudo.parse and pbc.gto.pseudo.load
It is allowed to mix the Quantum chemistry effective core potentail (ECP) with
crystal pseudo potential (PP). Input ECP with .ecp attribute and PP with
.pseudo attribute.
See also pyscf/pbc/gto/pseudo/GTH_POTENTIALS for the GTH-potential format
'''
from pyscf.pbc import gto
cell = gto.M(atom='''
Si1 0 0 0
Si2 1 1 1''',
h = '''3 0 0
0 3 0
0 0 3''',
gs = [5,5,5],
basis = {'Si1': 'gth-szv', # Goedecker, Teter and Hutter single zeta basis
'Si2': 'lanl2dz'},
pseudo = {'Si1': gto.pseudo.parse('''
Si
2 2
0.44000000 1 -6.25958674
2
0.44465247 2 8.31460936 -2.33277947
3.01160535
0.50279207 1 2.33241791
''')},
ecp = {'Si2': 'lanl2dz'}, # ECP for second Si atom
)
| [
"[email protected]"
] | |
464e685ad0f3d58848144bcb4fac33320fefa857 | c9cdc07694c4cb60025f7a471d9f7baf06ea48ac | /roc/utils/lyrics_match.py | 5e0d6adbf6fa756b0bd006a5754cb890e282beed | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | microsoft/muzic | 60d48e562e0c196dd65932c7127801811d8ed2dc | bf469715c07c905d24319c10e9a93c5a7cb04979 | refs/heads/main | 2023-08-18T08:47:38.831559 | 2023-08-12T09:58:26 | 2023-08-12T09:58:26 | 373,462,930 | 3,453 | 327 | MIT | 2023-09-01T10:29:22 | 2021-06-03T10:06:54 | Python | UTF-8 | Python | false | false | 4,469 | py | import random
def check(arr, m, a1, a2, mod1, mod2):
n = len(arr)
aL1, aL2 = pow(a1, m, mod1), pow(a2, m, mod2)
h1, h2 = 0, 0
for i in range(m):
h1 = (h1 * a1 + arr[i]) % mod1
h2 = (h2 * a2 + arr[i]) % mod2
seen = dict()
seen[(h1, h2)] = [m - 1]
for start in range(1, n - m + 1):
h1 = (h1 * a1 - arr[start - 1] * aL1 + arr[start + m - 1]) % mod1
h2 = (h2 * a2 - arr[start - 1] * aL2 + arr[start + m - 1]) % mod2
if (h1, h2) in seen:
if min(seen[(h1, h2)]) < start:
return start
else:
seen[(h1,h2)].append(start + m - 1)
else:
seen[(h1, h2)] = [start + m - 1]
#seen.add((h1, h2))
return -1
def longestDupSubstring(arr):
a1, a2 = random.randint(26, 100), random.randint(26, 100)
mod1, mod2 = random.randint(10**9+7, 2**31-1), random.randint(10**9+7, 2**31-1)
n = len(arr)
l, r = 1, n-1
length, start = 0, -1
while l <= r:
m = l + (r - l + 1) // 2
idx = check(arr, m, a1, a2, mod1, mod2)
if idx != -1:
l = m + 1
length = m
start = idx
else:
r = m - 1
return start, length
def KMP_search(s,p,parent,init):
def buildNext(p):
nxt = [0]
x = 1
now = 0
while x < len(p):
if p[now] == p[x]:
now += 1
x += 1
nxt.append(now)
elif now:
now = nxt[now - 1]
else:
nxt.append(0)
x += 1
return nxt
tar = 0
pos = 0
nxt = buildNext(p)
is_first = True
while tar < len(s):
if s[tar] == p[pos] and (init or parent[tar] == -1):
tar += 1
pos += 1
elif pos and (init or parent[tar] == -1):
pos = nxt[pos - 1]
else:
tar += 1
if pos == len(p):
if is_first: # first matching
is_first = False
parent_start_idx = tar - pos
else:
parent[tar - pos:tar] = list(range(parent_start_idx,parent_start_idx+pos))
pos = 0 # different from a standard kmp, here substrings are not allowed to overlap. So the pos is not nxt[pos - 1] but 0
return parent
def Lyrics_match(sentence):
"""
Recognition algorithm.
First, we find (L,K) repeat which is like a longest repeated substring problem. A solution can be found in https://leetcode-cn.com/problems/longest-duplicate-substring/solution/zui-chang-zhong-fu-zi-chuan-by-leetcode-0i9rd/
The code here refers to this solution.
Then we use a modified KMP to find where does the first (L,K) repeat begins.
"""
# sentence = lyrics.strip().split(' ')
all_words = word_counter = [len(i) for i in sentence]
parent = [-1] * len(word_counter)
init = 0
chorus_start = -1
chorus_length = -1
while True:
start, length = longestDupSubstring(word_counter)
if chorus_length >= len(parent) * 0.4 and init == 1:
chorus_start = start
chorus_length = length
print(chorus_start, chorus_length)
init += 1
if init == 0:
chorus_start = start
chorus_length = length
init += 1
if start < 0 or length < 3:
break
p = word_counter[start:start + length]
parent = KMP_search(all_words, p, parent, init)
tmp = list()
for i in range(len(word_counter)):
if parent[i] == -1:
tmp.append(word_counter[i])
word_counter = tmp
# start, length = longestDupSubstring(word_counter)
# print('for test:',parent)
# print('length:',len(parent))
for idx in range(1, len(all_words)):
if parent[idx] == -1 and all_words[idx - 1] == all_words[idx]:
parent[idx] = -2
if parent[idx - 1] == -2:
parent[idx - 1] = idx - 2
if parent[idx] >= 0 and parent[parent[idx]] != -1 and parent[parent[idx]] != -2:
parent[idx] = parent[parent[idx]]
parent[-1] = -1
return parent, parent[chorus_start], chorus_length
# return [-1] * len(parent), -1, -1 # ablation, when no structure
| [
"[email protected]"
] | |
abe0f5b5b6d388c93b5c354fe52a97eee183cba4 | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/build/bdist.win32/winexe/temp/pandas._sparse.py | f0c47240f7d34327a47feefb2690b93ed480e706 | [] | no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 364 | py |
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'pandas._sparse.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
| [
"[email protected]"
] | |
44d1bbc2af444a01ab7986e58f5dcb7d0d358f20 | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /homeassistant/helpers/trace.py | fd7a3081f7a34509b19e4db724ac46f12cc8c574 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 8,013 | py | """Helpers for script and condition tracing."""
from __future__ import annotations
from collections import deque
from collections.abc import Callable, Generator
from contextlib import contextmanager
from contextvars import ContextVar
from functools import wraps
from typing import Any, cast
from homeassistant.core import ServiceResponse
import homeassistant.util.dt as dt_util
from .typing import TemplateVarsType
class TraceElement:
"""Container for trace data."""
__slots__ = (
"_child_key",
"_child_run_id",
"_error",
"path",
"_result",
"reuse_by_child",
"_timestamp",
"_variables",
)
def __init__(self, variables: TemplateVarsType, path: str) -> None:
"""Container for trace data."""
self._child_key: str | None = None
self._child_run_id: str | None = None
self._error: Exception | None = None
self.path: str = path
self._result: dict[str, Any] | None = None
self.reuse_by_child = False
self._timestamp = dt_util.utcnow()
if variables is None:
variables = {}
last_variables = variables_cv.get() or {}
variables_cv.set(dict(variables))
changed_variables = {
key: value
for key, value in variables.items()
if key not in last_variables or last_variables[key] != value
}
self._variables = changed_variables
def __repr__(self) -> str:
"""Container for trace data."""
return str(self.as_dict())
def set_child_id(self, child_key: str, child_run_id: str) -> None:
"""Set trace id of a nested script run."""
self._child_key = child_key
self._child_run_id = child_run_id
def set_error(self, ex: Exception) -> None:
"""Set error."""
self._error = ex
def set_result(self, **kwargs: Any) -> None:
"""Set result."""
self._result = {**kwargs}
def update_result(self, **kwargs: Any) -> None:
"""Set result."""
old_result = self._result or {}
self._result = {**old_result, **kwargs}
def as_dict(self) -> dict[str, Any]:
"""Return dictionary version of this TraceElement."""
result: dict[str, Any] = {"path": self.path, "timestamp": self._timestamp}
if self._child_key is not None:
domain, _, item_id = self._child_key.partition(".")
result["child_id"] = {
"domain": domain,
"item_id": item_id,
"run_id": str(self._child_run_id),
}
if self._variables:
result["changed_variables"] = self._variables
if self._error is not None:
result["error"] = str(self._error)
if self._result is not None:
result["result"] = self._result
return result
# Context variables for tracing
# Current trace
trace_cv: ContextVar[dict[str, deque[TraceElement]] | None] = ContextVar(
"trace_cv", default=None
)
# Stack of TraceElements
trace_stack_cv: ContextVar[list[TraceElement] | None] = ContextVar(
"trace_stack_cv", default=None
)
# Current location in config tree
trace_path_stack_cv: ContextVar[list[str] | None] = ContextVar(
"trace_path_stack_cv", default=None
)
# Copy of last variables
variables_cv: ContextVar[Any | None] = ContextVar("variables_cv", default=None)
# (domain.item_id, Run ID)
trace_id_cv: ContextVar[tuple[str, str] | None] = ContextVar(
"trace_id_cv", default=None
)
# Reason for stopped script execution
script_execution_cv: ContextVar[StopReason | None] = ContextVar(
"script_execution_cv", default=None
)
def trace_id_set(trace_id: tuple[str, str]) -> None:
"""Set id of the current trace."""
trace_id_cv.set(trace_id)
def trace_id_get() -> tuple[str, str] | None:
"""Get id if the current trace."""
return trace_id_cv.get()
def trace_stack_push(trace_stack_var: ContextVar, node: Any) -> None:
"""Push an element to the top of a trace stack."""
if (trace_stack := trace_stack_var.get()) is None:
trace_stack = []
trace_stack_var.set(trace_stack)
trace_stack.append(node)
def trace_stack_pop(trace_stack_var: ContextVar) -> None:
"""Remove the top element from a trace stack."""
trace_stack = trace_stack_var.get()
trace_stack.pop()
def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None
def trace_path_push(suffix: str | list[str]) -> int:
"""Go deeper in the config tree."""
if isinstance(suffix, str):
suffix = [suffix]
for node in suffix:
trace_stack_push(trace_path_stack_cv, node)
return len(suffix)
def trace_path_pop(count: int) -> None:
"""Go n levels up in the config tree."""
for _ in range(count):
trace_stack_pop(trace_path_stack_cv)
def trace_path_get() -> str:
"""Return a string representing the current location in the config tree."""
if not (path := trace_path_stack_cv.get()):
return ""
return "/".join(path)
def trace_append_element(
trace_element: TraceElement,
maxlen: int | None = None,
) -> None:
"""Append a TraceElement to trace[path]."""
if (trace := trace_cv.get()) is None:
trace = {}
trace_cv.set(trace)
if (path := trace_element.path) not in trace:
trace[path] = deque(maxlen=maxlen)
trace[path].append(trace_element)
def trace_get(clear: bool = True) -> dict[str, deque[TraceElement]] | None:
"""Return the current trace."""
if clear:
trace_clear()
return trace_cv.get()
def trace_clear() -> None:
"""Clear the trace."""
trace_cv.set({})
trace_stack_cv.set(None)
trace_path_stack_cv.set(None)
variables_cv.set(None)
script_execution_cv.set(StopReason())
def trace_set_child_id(child_key: str, child_run_id: str) -> None:
"""Set child trace_id of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
if node:
node.set_child_id(child_key, child_run_id)
def trace_set_result(**kwargs: Any) -> None:
"""Set the result of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
node.set_result(**kwargs)
def trace_update_result(**kwargs: Any) -> None:
"""Update the result of TraceElement at the top of the stack."""
node = cast(TraceElement, trace_stack_top(trace_stack_cv))
node.update_result(**kwargs)
class StopReason:
"""Mutable container class for script_execution."""
script_execution: str | None = None
response: ServiceResponse = None
def script_execution_set(reason: str, response: ServiceResponse = None) -> None:
"""Set stop reason."""
if (data := script_execution_cv.get()) is None:
return
data.script_execution = reason
data.response = response
def script_execution_get() -> str | None:
"""Return the stop reason."""
if (data := script_execution_cv.get()) is None:
return None
return data.script_execution
@contextmanager
def trace_path(suffix: str | list[str]) -> Generator:
"""Go deeper in the config tree.
Can not be used as a decorator on couroutine functions.
"""
count = trace_path_push(suffix)
try:
yield
finally:
trace_path_pop(count)
def async_trace_path(suffix: str | list[str]) -> Callable:
"""Go deeper in the config tree.
To be used as a decorator on coroutine functions.
"""
def _trace_path_decorator(func: Callable) -> Callable:
"""Decorate a coroutine function."""
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
with trace_path(suffix):
await func(*args)
return async_wrapper
return _trace_path_decorator
| [
"[email protected]"
] | |
ce7617e91bf4c0dc6cb4457633605de9e5fd5d14 | 1e43fd5e134157e6f034327ffbf3e6501c67275d | /mlps/core/data/cnvrtr/functions/SISpWC.py | 5c3ba39827d1cc7b96ee19a82f398143f7ad5b4c | [
"Apache-2.0"
] | permissive | sone777/automl-mlps | f15780e23142e0f3f368815678959c7954966e71 | a568b272333bc22dc979ac3affc9762ac324efd8 | refs/heads/main | 2023-08-24T10:07:30.834883 | 2021-11-03T07:41:15 | 2021-11-03T07:41:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,150 | py | # -*- coding: utf-8 -*-
# Author : Jin Kim
# e-mail : [email protected]
# Powered by Seculayer © 2021 Service Model Team, R&D Center.
import json
from typing import Tuple
from mlps.common.Constants import Constants
from mlps.core.data.cnvrtr.functions.SpWCAbstract import SpWCAbstract
class SISpWC(SpWCAbstract):
@staticmethod
def _load_special_word_dict() -> Tuple[dict, dict]:
keyword_map_path = "{}/{}".format(Constants.DIR_RESOURCES_CNVRTR, "SI_keywords_map.json")
f = open(keyword_map_path, "r")
special_dict = json.loads(f.read())
f.close()
return special_dict, {}
def processConvert(self, data):
return self.apply(data)
if __name__ == '__main__':
str_data_list = list()
str_data_list.append("\" ? ? ^ ? ? ? ? ? ? ? = ? ? ? ? ? ? ? ? @ ? ? ? + ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? * : | ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ^ ? ? ! ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? [ ? ? ? ? ^ ; ? ? ? ? ? ? ? ? ' * ? ? ? ? $ ? ? ' ? ? ? # ) ? ? ? ? ? > ? ? = ? ? ? ? ? ? ? ? ? ? ? ? { ? ? ? ? | ? ? ? ? ? ? ? ? \" \" ? ? | ] ? ? ? ? ? ? | ? % ? ? ? ? ? ? ? ? ? ? \\ ? ? | @ ? ? ? ? ? ? ? ? \" \" ? ? ? ? ? ? ? ~ ? ? ? ? ? ? \u007f ? ? ? ? ? / ? ? ? ? ? = ? ? ? ? ? _ ? ? ? ? ? ? * ? ? ~ ? ? ? ? \u007f ? ? ? ? ? ? ? ? ? ; ? ? ? ? ? ? ? $ cmd ? ? ? ? ? ? ? ? ? ? ? ? ^ ? ? ? ? ? ? _ ? ? % ? ? @ ? ? ? ? ? ? ? ? ? ? > ; ? * / ? ? ? } ? ? ? ? ? ? ( ? ? ? ? ? ? \" \" ? ? ? ? ? ? ? ? ' ? ? # ? ? ? ? ? ? ) ? ? ? ? ) ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? } ? ` ^ ? ? ? ? ? ? ? } ? ? ? ? ? = & ? ? ? ? ? ? ? ? ? ? ? ? ? ? - ? + } ? ? ? ? ? ' * ' ? ? ? ) ? ? ? ? ? ? \\ ? ; ! ? . ? ? ? ? : ? ? ? ? ? ` ? ? ? ? \" \" ? ? ? ? > _ ? ? ? ? ? ? ? ? ? ? ? # = ! ) ? ? ? ? ? } ? ? ? ? ? ? ? ? > ? + ? ? ? ? ? ? ' ? ? ? / ? ? ? ? ? ? ? ? ? ? $ ? - ? | ? ? ? ? = ? ? ? ? ? ? ? ? ? % [ ? ? ? ? ? ? ? < ? ? > ? ? ? : ? ] < ? ? ? ? ? ? ? ? | ? < ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ) ? ? - ? ? ? ) ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? * ? ? ? > ? ? ? ? ? ? ? / { ? ? ? ? @ ? ? ? ? ? ? ? ? ? ? $ ? \u007f / | ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ~ ? ? ? ? ? ? / ? ? \u007f ' ? ? ? ? ? ? ? ? ? ? ? ? ? > ? ? ? ? ? ? ? ? ? ? ? & ? ? ? ? ? ? ; ? ? ? & ? ? ? ? ? ? ? ? ~ ? ? ? ? ? ? \" \" ? ? ? ? ? ? ? ? ? ? ? ? . & ? ? ? ? ? ? ? ? ~ ? & ? ? ? ? ? ? _ ? _ ? ? ? ? ? ? ? ? % ? ? ? ? ? ? ? ? ? # ? } ? ? ? ? ^ ? = ? } ? ? ? ? ? & ? . > ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? * ? ' ? < ? ? ? ? ? | \\ ? ? ? ? ? / ? ? $ # ? ? ? ? ? / ? ? = ? + ? ? ? ? ? ? ? \u007f [ ? ? ? ~ ? ? ? ? ? ? * ? ? ? ? ? ? ? ? ? ? ? _ ? ? * ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? \\ ? ? ? ? ? ? ? ( ? ? ? ? ? ? ? ? ? ) ` : . ? ? ? # ? ? ? ? ? ? ? ? ` ? ? ? ? ' ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? / > ? ? ? ? ? ? ? ? ! ? ? ? ? ? ? ? ? ? ? ? ? ? $ ? ? & > ? ? % _ / ? - ? ? ? ? ? ? ) \u0080 ? # ? ? ? ? ? ? ? % ? ? ? ? \u007f ? ` { ? ? ? ? ? * ? \u007f ? ? ? ? ? ? ? ? ? ? \u007f ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? \"")
cvt_fn = SISpWC(arg_list=[200, 255], stat_dict=dict())
for str_data in str_data_list:
print(cvt_fn.apply(str_data))
print("".join(cvt_fn.remove_common_word(str_data)))
| [
"[email protected]"
] | |
3bcfb7964232ab055502d62f65a86a7408a1281d | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /Model_Forms/Emp_registration/Employee_registration/Firstapp/admin.py | 925012200829375f0a86c3261b63b7c148330078 | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from django.contrib import admin
from .models import Employee
# Register your models here.
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['eid','ename','designation','salary']
admin.site.register(Employee) | [
"[email protected]"
] | |
504f496aa083c91ef2d12b8abd0c85d27d16046e | 68ea39dd34c1f212c04f7a89b8b81287e33f721d | /build/fbcode_builder/getdeps/builder.py | 477564be272e520d2140c3241be85c213aeced16 | [
"MIT",
"Apache-2.0"
] | permissive | Esri/folly | 797ad1ef2c504d4e00a07ea76d31b84db0703dca | 3fb0bf3be74ffd9548370c97a704347b0ca1e2cf | refs/heads/master | 2023-03-27T13:00:38.492316 | 2019-09-30T14:55:40 | 2019-09-30T14:57:25 | 211,894,753 | 0 | 0 | Apache-2.0 | 2019-12-02T13:15:46 | 2019-09-30T15:39:56 | null | UTF-8 | Python | false | false | 28,392 | py | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import json
import os
import shutil
import stat
import subprocess
import sys
from .envfuncs import Env, add_path_entry, path_search
from .fetcher import copy_if_different
from .runcmd import run_cmd
class BuilderBase(object):
def __init__(
self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir, env=None
):
self.env = Env()
if env:
self.env.update(env)
subdir = manifest.get("build", "subdir", ctx=ctx)
if subdir:
src_dir = os.path.join(src_dir, subdir)
self.ctx = ctx
self.src_dir = src_dir
self.build_dir = build_dir or src_dir
self.inst_dir = inst_dir
self.build_opts = build_opts
self.manifest = manifest
def _get_cmd_prefix(self):
if self.build_opts.is_windows():
vcvarsall = self.build_opts.get_vcvars_path()
if vcvarsall is not None:
# Since it sets rather a large number of variables we mildly abuse
# the cmd quoting rules to assemble a command that calls the script
# to prep the environment and then triggers the actual command that
# we wanted to run.
return [vcvarsall, "amd64", "&&"]
return []
def _run_cmd(self, cmd, cwd=None, env=None):
if env:
e = self.env.copy()
e.update(env)
env = e
else:
env = self.env
cmd_prefix = self._get_cmd_prefix()
if cmd_prefix:
cmd = cmd_prefix + cmd
run_cmd(cmd=cmd, env=env, cwd=cwd or self.build_dir)
def build(self, install_dirs, reconfigure):
print("Building %s..." % self.manifest.name)
if self.build_dir is not None:
if not os.path.isdir(self.build_dir):
os.makedirs(self.build_dir)
reconfigure = True
self._build(install_dirs=install_dirs, reconfigure=reconfigure)
def run_tests(self, install_dirs, schedule_type, owner):
""" Execute any tests that we know how to run. If they fail,
raise an exception. """
pass
def _build(self, install_dirs, reconfigure):
""" Perform the build.
install_dirs contains the list of installation directories for
the dependencies of this project.
reconfigure will be set to true if the fetcher determined
that the sources have changed in such a way that the build
system needs to regenerate its rules. """
pass
def _compute_env(self, install_dirs):
# CMAKE_PREFIX_PATH is only respected when passed through the
# environment, so we construct an appropriate path to pass down
return self.build_opts.compute_env_for_install_dirs(install_dirs, env=self.env)
class MakeBuilder(BuilderBase):
def __init__(self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir, args):
super(MakeBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.args = args or []
def _build(self, install_dirs, reconfigure):
env = self._compute_env(install_dirs)
# Need to ensure that PREFIX is set prior to install because
# libbpf uses it when generating its pkg-config file
cmd = (
["make", "-j%s" % self.build_opts.num_jobs]
+ self.args
+ ["PREFIX=" + self.inst_dir]
)
self._run_cmd(cmd, env=env)
install_cmd = ["make", "install"] + self.args + ["PREFIX=" + self.inst_dir]
self._run_cmd(install_cmd, env=env)
class AutoconfBuilder(BuilderBase):
def __init__(self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir, args):
super(AutoconfBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.args = args or []
def _build(self, install_dirs, reconfigure):
configure_path = os.path.join(self.src_dir, "configure")
autogen_path = os.path.join(self.src_dir, "autogen.sh")
env = self._compute_env(install_dirs)
if not os.path.exists(configure_path):
print("%s doesn't exist, so reconfiguring" % configure_path)
# This libtoolize call is a bit gross; the issue is that
# `autoreconf` as invoked by libsodium's `autogen.sh` doesn't
# seem to realize that it should invoke libtoolize and then
# error out when the configure script references a libtool
# related symbol.
self._run_cmd(["libtoolize"], cwd=self.src_dir, env=env)
# We generally prefer to call the `autogen.sh` script provided
# by the project on the basis that it may know more than plain
# autoreconf does.
if os.path.exists(autogen_path):
self._run_cmd(["bash", autogen_path], cwd=self.src_dir, env=env)
else:
self._run_cmd(["autoreconf", "-ivf"], cwd=self.src_dir, env=env)
configure_cmd = [configure_path, "--prefix=" + self.inst_dir] + self.args
self._run_cmd(configure_cmd, env=env)
self._run_cmd(["make", "-j%s" % self.build_opts.num_jobs], env=env)
self._run_cmd(["make", "install"], env=env)
class Iproute2Builder(BuilderBase):
# ./configure --prefix does not work for iproute2.
# Thus, explicitly copy sources from src_dir to build_dir, bulid,
# and then install to inst_dir using DESTDIR
# lastly, also copy include from build_dir to inst_dir
def __init__(self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir):
super(Iproute2Builder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
def _patch(self):
# FBOSS build currently depends on an old version of iproute2 (commit
# 7ca63aef7d1b0c808da0040c6b366ef7a61f38c1). This is missing a commit
# (ae717baf15fb4d30749ada3948d9445892bac239) needed to build iproute2
# successfully. Apply it viz.: include stdint.h
# Reference: https://fburl.com/ilx9g5xm
with open(self.build_dir + "/tc/tc_core.c", "r") as f:
data = f.read()
with open(self.build_dir + "/tc/tc_core.c", "w") as f:
f.write("#include <stdint.h>\n")
f.write(data)
def _build(self, install_dirs, reconfigure):
configure_path = os.path.join(self.src_dir, "configure")
env = self.env.copy()
self._run_cmd([configure_path], env=env)
shutil.rmtree(self.build_dir)
shutil.copytree(self.src_dir, self.build_dir)
self._patch()
self._run_cmd(["make", "-j%s" % self.build_opts.num_jobs], env=env)
install_cmd = ["make", "install", "DESTDIR=" + self.inst_dir]
for d in ["include", "lib"]:
if not os.path.isdir(os.path.join(self.inst_dir, d)):
shutil.copytree(
os.path.join(self.build_dir, d), os.path.join(self.inst_dir, d)
)
self._run_cmd(install_cmd, env=env)
class CMakeBuilder(BuilderBase):
MANUAL_BUILD_SCRIPT = """\
#!{sys.executable}
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import subprocess
import sys
CMAKE = {cmake!r}
SRC_DIR = {src_dir!r}
BUILD_DIR = {build_dir!r}
INSTALL_DIR = {install_dir!r}
CMD_PREFIX = {cmd_prefix!r}
CMAKE_ENV = {env_str}
CMAKE_DEFINE_ARGS = {define_args_str}
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"cmake_args",
nargs=argparse.REMAINDER,
help='Any extra arguments after an "--" argument will be passed '
"directly to CMake."
)
ap.add_argument(
"--mode",
choices=["configure", "build", "install"],
default="configure",
help="The mode to run: configure, build, or install. "
"Defaults to configure",
)
ap.add_argument(
"--build",
action="store_const",
const="build",
dest="mode",
help="An alias for --mode=build",
)
ap.add_argument(
"--install",
action="store_const",
const="install",
dest="mode",
help="An alias for --mode=install",
)
args = ap.parse_args()
# Strip off a leading "--" from the additional CMake arguments
if args.cmake_args and args.cmake_args[0] == "--":
args.cmake_args = args.cmake_args[1:]
env = CMAKE_ENV
if args.mode == "configure":
full_cmd = CMD_PREFIX + [CMAKE, SRC_DIR] + CMAKE_DEFINE_ARGS + args.cmake_args
elif args.mode in ("build", "install"):
target = "all" if args.mode == "build" else "install"
full_cmd = CMD_PREFIX + [
CMAKE,
"--build",
BUILD_DIR,
"--target",
target,
"--config",
"Release",
] + args.cmake_args
else:
ap.error("unknown invocation mode: %s" % (args.mode,))
cmd_str = " ".join(full_cmd)
print("Running: %r" % (cmd_str,))
proc = subprocess.run(full_cmd, env=env, cwd=BUILD_DIR)
sys.exit(proc.returncode)
if __name__ == "__main__":
main()
"""
def __init__(
self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir, defines
):
super(CMakeBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.defines = defines or {}
def _invalidate_cache(self):
for name in ["CMakeCache.txt", "CMakeFiles"]:
name = os.path.join(self.build_dir, name)
if os.path.isdir(name):
shutil.rmtree(name)
elif os.path.exists(name):
os.unlink(name)
def _needs_reconfigure(self):
for name in ["CMakeCache.txt", "build.ninja"]:
name = os.path.join(self.build_dir, name)
if not os.path.exists(name):
return True
return False
def _write_build_script(self, **kwargs):
env_lines = [" {!r}: {!r},".format(k, v) for k, v in kwargs["env"].items()]
kwargs["env_str"] = "\n".join(["{"] + env_lines + ["}"])
define_arg_lines = ["["]
for arg in kwargs["define_args"]:
# Replace the CMAKE_INSTALL_PREFIX argument to use the INSTALL_DIR
# variable that we define in the MANUAL_BUILD_SCRIPT code.
if arg.startswith("-DCMAKE_INSTALL_PREFIX="):
value = " {!r}.format(INSTALL_DIR),".format(
"-DCMAKE_INSTALL_PREFIX={}"
)
else:
value = " {!r},".format(arg)
define_arg_lines.append(value)
define_arg_lines.append("]")
kwargs["define_args_str"] = "\n".join(define_arg_lines)
# In order to make it easier for developers to manually run builds for
# CMake-based projects, write out some build scripts that can be used to invoke
# CMake manually.
build_script_path = os.path.join(self.build_dir, "run_cmake.py")
script_contents = self.MANUAL_BUILD_SCRIPT.format(**kwargs)
with open(build_script_path, "w") as f:
f.write(script_contents)
os.chmod(build_script_path, 0o755)
def _compute_cmake_define_args(self, env):
defines = {
"CMAKE_INSTALL_PREFIX": self.inst_dir,
"BUILD_SHARED_LIBS": "OFF",
# Some of the deps (rsocket) default to UBSAN enabled if left
# unspecified. Some of the deps fail to compile in release mode
# due to warning->error promotion. RelWithDebInfo is the happy
# medium.
"CMAKE_BUILD_TYPE": "RelWithDebInfo",
}
if "SANDCASTLE" not in os.environ:
# We sometimes see intermittent ccache related breakages on some
# of the FB internal CI hosts, so we prefer to disable ccache
# when running in that environment.
ccache = path_search(env, "ccache")
if ccache:
defines["CMAKE_CXX_COMPILER_LAUNCHER"] = ccache
if "GITHUB_ACTIONS" in os.environ and self.build_opts.is_windows():
# GitHub actions: the host has both gcc and msvc installed, and
# the default behavior of cmake is to prefer gcc.
# Instruct cmake that we want it to use cl.exe; this is important
# because Boost prefers cl.exe and the mismatch results in cmake
# with gcc not being able to find boost built with cl.exe.
defines["CMAKE_C_COMPILER"] = "cl.exe"
defines["CMAKE_CXX_COMPILER"] = "cl.exe"
if self.build_opts.is_darwin():
# Try to persuade cmake to set the rpath to match the lib
# dirs of the dependencies. This isn't automatic, and to
# make things more interesting, cmake uses `;` as the path
# separator, so translate the runtime path to something
# that cmake will parse
defines["CMAKE_INSTALL_RPATH"] = ";".join(
env.get("DYLD_LIBRARY_PATH", "").split(":")
)
# Tell cmake that we want to set the rpath in the tree
# at build time. Without this the rpath is only set
# at the moment that the binaries are installed. That
# default is problematic for example when using the
# gtest integration in cmake which runs the built test
# executables during the build to discover the set of
# tests.
defines["CMAKE_BUILD_WITH_INSTALL_RPATH"] = "ON"
defines.update(self.defines)
define_args = ["-D%s=%s" % (k, v) for (k, v) in defines.items()]
# if self.build_opts.is_windows():
# define_args += ["-G", "Visual Studio 15 2017 Win64"]
define_args += ["-G", "Ninja"]
return define_args
def _build(self, install_dirs, reconfigure):
reconfigure = reconfigure or self._needs_reconfigure()
env = self._compute_env(install_dirs)
# Resolve the cmake that we installed
cmake = path_search(env, "cmake")
if cmake is None:
raise Exception("Failed to find CMake")
if reconfigure:
define_args = self._compute_cmake_define_args(env)
self._write_build_script(
cmd_prefix=self._get_cmd_prefix(),
cmake=cmake,
env=env,
define_args=define_args,
src_dir=self.src_dir,
build_dir=self.build_dir,
install_dir=self.inst_dir,
sys=sys,
)
self._invalidate_cache()
self._run_cmd([cmake, self.src_dir] + define_args, env=env)
self._run_cmd(
[
cmake,
"--build",
self.build_dir,
"--target",
"install",
"--config",
"Release",
"-j",
str(self.build_opts.num_jobs),
],
env=env,
)
def run_tests(self, install_dirs, schedule_type, owner):
env = self._compute_env(install_dirs)
ctest = path_search(env, "ctest")
cmake = path_search(env, "cmake")
def get_property(test, propname, defval=None):
""" extracts a named property from a cmake test info json blob.
The properties look like:
[{"name": "WORKING_DIRECTORY"},
{"value": "something"}]
We assume that it is invalid for the same named property to be
listed more than once.
"""
props = test.get("properties", [])
for p in props:
if p.get("name", None) == propname:
return p.get("value", defval)
return defval
def list_tests():
output = subprocess.check_output(
[ctest, "--show-only=json-v1"], env=env, cwd=self.build_dir
)
try:
data = json.loads(output.decode("utf-8"))
except ValueError as exc:
raise Exception(
"Failed to decode cmake test info using %s: %s. Output was: %r"
% (ctest, str(exc), output)
)
tests = []
machine_suffix = self.build_opts.host_type.as_tuple_string()
for test in data["tests"]:
working_dir = get_property(test, "WORKING_DIRECTORY")
command = test["command"]
if working_dir:
command = [cmake, "-E", "chdir", working_dir] + command
tests.append(
{
"type": "custom",
"target": "%s-%s-getdeps-%s"
% (self.manifest.name, test["name"], machine_suffix),
"command": command,
}
)
return tests
testpilot = path_search(env, "testpilot")
if testpilot:
buck_test_info = list_tests()
buck_test_info_name = os.path.join(self.build_dir, ".buck-test-info.json")
with open(buck_test_info_name, "w") as f:
json.dump(buck_test_info, f)
env.set("http_proxy", "")
env.set("https_proxy", "")
machine_suffix = self.build_opts.host_type.as_tuple_string()
runs = []
testpilot_args = [
testpilot,
# Need to force the repo type otherwise testpilot on windows
# can be confused (presumably sparse profile related)
"--force-repo",
"fbcode",
"--force-repo-root",
self.build_opts.fbsource_dir,
"--buck-test-info",
buck_test_info_name,
"--test-config",
"platform=%s" % machine_suffix,
"buildsystem=getdeps",
]
if owner:
testpilot_args += ["--contacts", owner]
if schedule_type == "continuous":
runs.append(
[
"--tag-new-tests",
"--collection",
"oss-continuous",
"--purpose",
"continuous",
]
)
elif schedule_type == "testwarden":
# One run to assess new tests
runs.append(
[
"--tag-new-tests",
"--collection",
"oss-new-test-stress",
"--stress-runs",
"10",
"--purpose",
"stress-run-new-test",
]
)
# And another for existing tests
runs.append(
[
"--tag-new-tests",
"--collection",
"oss-existing-test-stress",
"--stress-runs",
"10",
"--purpose",
"stress-run",
]
)
else:
runs.append(["--collection", "oss-diff", "--purpose", "diff"])
for run in runs:
self._run_cmd(
testpilot_args + run,
cwd=self.build_opts.fbcode_builder_dir,
env=env,
)
else:
self._run_cmd(
[ctest, "--output-on-failure", "-j", str(self.build_opts.num_jobs)],
env=env,
)
class NinjaBootstrap(BuilderBase):
def __init__(self, build_opts, ctx, manifest, build_dir, src_dir, inst_dir):
super(NinjaBootstrap, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
def _build(self, install_dirs, reconfigure):
self._run_cmd([sys.executable, "configure.py", "--bootstrap"], cwd=self.src_dir)
src_ninja = os.path.join(self.src_dir, "ninja")
dest_ninja = os.path.join(self.inst_dir, "bin/ninja")
bin_dir = os.path.dirname(dest_ninja)
if not os.path.exists(bin_dir):
os.makedirs(bin_dir)
shutil.copyfile(src_ninja, dest_ninja)
shutil.copymode(src_ninja, dest_ninja)
class OpenSSLBuilder(BuilderBase):
def __init__(self, build_opts, ctx, manifest, build_dir, src_dir, inst_dir):
super(OpenSSLBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
def _build(self, install_dirs, reconfigure):
configure = os.path.join(self.src_dir, "Configure")
# prefer to resolve the perl that we installed from
# our manifest on windows, but fall back to the system
# path on eg: darwin
env = self.env.copy()
for d in install_dirs:
bindir = os.path.join(d, "bin")
add_path_entry(env, "PATH", bindir, append=False)
perl = path_search(env, "perl", "perl")
if self.build_opts.is_windows():
make = "nmake.exe"
args = ["VC-WIN64A-masm", "-utf-8"]
elif self.build_opts.is_darwin():
make = "make"
args = ["darwin64-x86_64-cc"]
else:
raise Exception("don't know how to build openssl for %r" % self.ctx)
self._run_cmd(
[
perl,
configure,
"--prefix=%s" % self.inst_dir,
"--openssldir=%s" % self.inst_dir,
]
+ args
+ [
"enable-static-engine",
"enable-capieng",
"no-makedepend",
"no-unit-test",
"no-tests",
]
)
self._run_cmd([make, "install_sw", "install_ssldirs"])
class Boost(BuilderBase):
def __init__(
self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir, b2_args
):
children = os.listdir(src_dir)
assert len(children) == 1, "expected a single directory entry: %r" % (children,)
boost_src = children[0]
assert boost_src.startswith("boost")
src_dir = os.path.join(src_dir, children[0])
super(Boost, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
self.b2_args = b2_args
def _build(self, install_dirs, reconfigure):
linkage = ["static"]
if self.build_opts.is_windows():
linkage.append("shared")
for link in linkage:
args = []
if self.build_opts.is_windows():
bootstrap = os.path.join(self.src_dir, "bootstrap.bat")
self._run_cmd([bootstrap], cwd=self.src_dir)
args += ["address-model=64"]
else:
bootstrap = os.path.join(self.src_dir, "bootstrap.sh")
self._run_cmd(
[bootstrap, "--prefix=%s" % self.inst_dir], cwd=self.src_dir
)
b2 = os.path.join(self.src_dir, "b2")
self._run_cmd(
[
b2,
"-j%s" % self.build_opts.num_jobs,
"--prefix=%s" % self.inst_dir,
"--builddir=%s" % self.build_dir,
]
+ args
+ self.b2_args
+ [
"link=%s" % link,
"runtime-link=shared",
"variant=release",
"threading=multi",
"debug-symbols=on",
"visibility=global",
"-d2",
"install",
],
cwd=self.src_dir,
)
class NopBuilder(BuilderBase):
def __init__(self, build_opts, ctx, manifest, src_dir, inst_dir):
super(NopBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, None, inst_dir
)
def build(self, install_dirs, reconfigure):
print("Installing %s -> %s" % (self.src_dir, self.inst_dir))
parent = os.path.dirname(self.inst_dir)
if not os.path.exists(parent):
os.makedirs(parent)
install_files = self.manifest.get_section_as_ordered_pairs(
"install.files", self.ctx
)
if install_files:
for src_name, dest_name in self.manifest.get_section_as_ordered_pairs(
"install.files", self.ctx
):
full_dest = os.path.join(self.inst_dir, dest_name)
full_src = os.path.join(self.src_dir, src_name)
dest_parent = os.path.dirname(full_dest)
if not os.path.exists(dest_parent):
os.makedirs(dest_parent)
if os.path.isdir(full_src):
if not os.path.exists(full_dest):
shutil.copytree(full_src, full_dest)
else:
shutil.copyfile(full_src, full_dest)
shutil.copymode(full_src, full_dest)
# This is a bit gross, but the mac ninja.zip doesn't
# give ninja execute permissions, so force them on
# for things that look like they live in a bin dir
if os.path.dirname(dest_name) == "bin":
st = os.lstat(full_dest)
os.chmod(full_dest, st.st_mode | stat.S_IXUSR)
else:
if not os.path.exists(self.inst_dir):
shutil.copytree(self.src_dir, self.inst_dir)
class SqliteBuilder(BuilderBase):
def __init__(self, build_opts, ctx, manifest, src_dir, build_dir, inst_dir):
super(SqliteBuilder, self).__init__(
build_opts, ctx, manifest, src_dir, build_dir, inst_dir
)
def _build(self, install_dirs, reconfigure):
for f in ["sqlite3.c", "sqlite3.h", "sqlite3ext.h"]:
src = os.path.join(self.src_dir, f)
dest = os.path.join(self.build_dir, f)
copy_if_different(src, dest)
cmake_lists = """
cmake_minimum_required(VERSION 3.1.3 FATAL_ERROR)
project(sqlite3 C)
add_library(sqlite3 STATIC sqlite3.c)
# These options are taken from the defaults in Makefile.msc in
# the sqlite distribution
target_compile_definitions(sqlite3 PRIVATE
-DSQLITE_ENABLE_COLUMN_METADATA=1
-DSQLITE_ENABLE_FTS3=1
-DSQLITE_ENABLE_RTREE=1
-DSQLITE_ENABLE_GEOPOLY=1
-DSQLITE_ENABLE_JSON1=1
-DSQLITE_ENABLE_STMTVTAB=1
-DSQLITE_ENABLE_DBPAGE_VTAB=1
-DSQLITE_ENABLE_DBSTAT_VTAB=1
-DSQLITE_INTROSPECTION_PRAGMAS=1
-DSQLITE_ENABLE_DESERIALIZE=1
)
install(TARGETS sqlite3)
install(FILES sqlite3.h sqlite3ext.h DESTINATION include)
"""
with open(os.path.join(self.build_dir, "CMakeLists.txt"), "w") as f:
f.write(cmake_lists)
defines = {
"CMAKE_INSTALL_PREFIX": self.inst_dir,
"BUILD_SHARED_LIBS": "OFF",
"CMAKE_BUILD_TYPE": "RelWithDebInfo",
}
define_args = ["-D%s=%s" % (k, v) for (k, v) in defines.items()]
define_args += ["-G", "Ninja"]
env = self._compute_env(install_dirs)
# Resolve the cmake that we installed
cmake = path_search(env, "cmake")
self._run_cmd([cmake, self.build_dir] + define_args, env=env)
self._run_cmd(
[
cmake,
"--build",
self.build_dir,
"--target",
"install",
"--config",
"Release",
"-j",
str(self.build_opts.num_jobs),
],
env=env,
)
| [
"[email protected]"
] | |
31ac22e94a6cc01be3588a743a788ebfcbbad6af | 5df1c192f8a74e8ca28792c2325239b78c22653a | /CashFlows/EaganJones/admin.py | 88d242badf9726cb54b20eeed29e4ed1017f0e9c | [] | no_license | knkemree/10_k_cash_flows | 8afe1e7463b9970f53624ef47660f06ea3052478 | 67b9572f3fdab799ff8d84c157f6bd28f0d7b69a | refs/heads/master | 2022-12-12T16:36:42.760573 | 2020-02-17T02:30:38 | 2020-02-17T02:34:46 | 241,003,252 | 0 | 0 | null | 2022-12-08T04:26:50 | 2020-02-17T02:26:05 | Python | UTF-8 | Python | false | false | 490 | py | from django.contrib import admin
from EaganJones.models import Companies, UserProfile
# Register your models here.
class CompaniesAdmin(admin.ModelAdmin):
list_display = ['companyname', 'cik', 'primarysymbol','created_at' ]
list_filter = ['companyname', 'cik', 'primarysymbol']
search_fields = ('companyname', 'cik', 'primarysymbol')
list_display_links = ['primarysymbol', 'companyname', ]
admin.site.register(Companies, CompaniesAdmin)
admin.site.register(UserProfile) | [
"[email protected]"
] | |
14914a3f9661f3552078415483c023d57ca7cde4 | 9ff696839d88998451f2cb2725a0051ef8642dc0 | /karen_test_16760/settings.py | bc1feac08c06ffbf74471bbe35ebf121cd7c16f7 | [] | no_license | crowdbotics-apps/karen-test-16760 | f67aacf4d07d10c70c4edf77a428dd8e12b4acf7 | 02492b4531be9561f1a046176918560e248764df | refs/heads/master | 2023-05-19T10:52:20.454231 | 2020-05-08T17:14:02 | 2020-05-08T17:14:02 | 262,379,844 | 0 | 0 | null | 2021-06-11T07:19:46 | 2020-05-08T16:58:28 | Python | UTF-8 | Python | false | false | 5,502 | py | """
Django settings for karen_test_16760 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"healthcare",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "karen_test_16760.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "karen_test_16760.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
070ac8d6e82a97d09acf77b94879182cf0be41cb | 5b5a49643c75aa43d5a876608383bc825ae1e147 | /python99/btree/p407.py | da50d0cdce94d8e84f544db6b6ac592ec01b54af | [] | no_license | rscai/python99 | 281d00473c0dc977f58ba7511c5bcb6f38275771 | 3fa0cb7683ec8223259410fb6ea2967e3d0e6f61 | refs/heads/master | 2020-04-12T09:08:49.500799 | 2019-10-06T07:47:17 | 2019-10-06T07:47:17 | 162,393,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from python99.btree.p406 import hbal_tree
def minNodes(h):
if h == 0:
return 0
if h == 1:
return 1
return 1 + minNodes(h-1) + minNodes(h-2)
def minHeight(n):
if n == 0:
return 0
return 1 + minHeight(n//2)
def maxHeight(n):
if n == 1:
return 1
if n == 0:
return 0
if n == 2:
return 2
if n == 3:
return 2
for hLeft in range(1, n):
nLeft = minNodes(hLeft)
hRight = maxHeight(n-1-nLeft)
if hLeft == hRight + 1 or hLeft == hRight:
return 1 + max(hLeft, hRight)
def nodes(t):
if t is None:
return 0
return 1 + nodes(t[1])+nodes(t[2])
def hbal_tree_nodes(n):
trees = []
for h in range(minHeight(n), maxHeight(n)+1):
trees = trees + hbal_tree(h)
return [tree for tree in trees if nodes(tree) == n]
| [
"[email protected]"
] | |
50b6c92b3e7d711f35807917787562435c2e7db1 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R3/benchmark/startQiskit55.py | dd4ea1965707a6f1dc5b831240f39bf9ec9dc38c | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,406 | py | # qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[0],input_qubit[2]) # number=7
prog.x(input_qubit[2]) # number=8
prog.cx(input_qubit[0],input_qubit[2]) # number=9
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit55.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
b950004786326937d3e02f09bde3a4eab88542c6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_daintier.py | 797ea89f17b1b10c07d5c3e288ebdb54becd1b3f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _DAINTIER():
def __init__(self,):
self.name = "DAINTIER"
self.definitions = dainty
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['dainty']
| [
"[email protected]"
] | |
75ec16d22ef5a3b1135af39f7205659c506e22b6 | 59f0fde411ca668b874fa6fa6001069b9146f596 | /src/blog/views.py | 078fc25eea043597782a68e56aaa8c171eac5a36 | [] | no_license | nabilatajrin/django-blog-application | 4c256755fc31b41f609b44a5329fb128d46c5fa1 | 7971f8f7d8b3b442fbd4530bc0f32dff7865adcc | refs/heads/master | 2020-12-06T16:09:00.310415 | 2020-11-03T05:37:34 | 2020-11-03T05:37:34 | 232,503,248 | 0 | 0 | null | 2020-01-08T07:24:39 | 2020-01-08T07:19:38 | Python | UTF-8 | Python | false | false | 405 | py | from django.http import Http404
from django.shortcuts import render, get_object_or_404
from .models import BlogPost
# Create your views here.
def blog_post_detail_page(request, slug):
#obj = BlogPost.objects.get(id=post_id)
obj = get_object_or_404(BlogPost, slug=slug)
template_name = 'blog_post_detail.html'
context = {"object": obj}
return render(request, template_name, context) | [
"[email protected]"
] | |
025a0f5190bc25975c5983890d97929e6c2e5122 | 3f763cf893b09a3be562858613c928703ff349e4 | /client/verta/verta/_swagger/_public/modeldb/model/ModeldbUpdateExperimentName.py | d45dee38182c3e1e4059f61548f9737e49feaa7f | [
"Apache-2.0"
] | permissive | VertaAI/modeldb | 636e46fc025b01a514d599b10e228c8735503357 | ec9ac7712500adb13fd815dfd476ce9f536c6921 | refs/heads/main | 2023-08-31T00:45:37.220628 | 2023-08-30T18:45:13 | 2023-08-30T18:45:13 | 71,305,435 | 844 | 142 | Apache-2.0 | 2023-09-14T19:24:13 | 2016-10-19T01:07:26 | Java | UTF-8 | Python | false | false | 649 | py | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbUpdateExperimentName(BaseType):
def __init__(self, id=None, name=None):
required = {
"id": False,
"name": False,
}
self.id = id
self.name = name
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('id', None)
if tmp is not None:
d['id'] = tmp
tmp = d.get('name', None)
if tmp is not None:
d['name'] = tmp
return ModeldbUpdateExperimentName(**d)
| [
"[email protected]"
] | |
dfc5c38afdf5ed2e69dd273efbbb072ffdb7031a | cb82ff3240e4367902d8169c60444a6aa019ffb6 | /python2.7/site-packages/twisted/trial/test/test_output.py | 54d020e3effbc0cf81825676e1a3765cc5514a4a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] | permissive | icepaule/sslstrip-hsts-openwrt | 0a8097af96beef4c48fcfa0b858367829b4dffdc | d5a2a18d2ac1cdb9a64fbf47235b87b9ebc24536 | refs/heads/master | 2020-12-31T10:22:04.618312 | 2020-02-07T18:39:08 | 2020-02-07T18:39:08 | 238,998,188 | 0 | 0 | MIT | 2020-02-07T18:35:59 | 2020-02-07T18:35:58 | null | UTF-8 | Python | false | false | 5,035 | py | from twisted.scripts import trial
from twisted.trial import runner
from twisted.trial.test import packages
import os, sys, StringIO
def runTrial(*args):
from twisted.trial import reporter
config = trial.Options()
config.parseOptions(args)
output = StringIO.StringIO()
myRunner = runner.TrialRunner(
reporter.VerboseTextReporter,
stream=output,
workingDirectory=config['temp-directory'])
suite = trial._getSuite(config)
result = myRunner.run(suite)
return output.getvalue()
class TestImportErrors(packages.SysPathManglingTest):
"""Actually run trial as if on the command line and check that the output
is what we expect.
"""
debug = False
parent = "_testImportErrors"
def runTrial(self, *args):
return runTrial('--temp-directory', self.mktemp(), *args)
def _print(self, stuff):
print stuff
return stuff
def failUnlessIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failUnlessIn(
containee, container, *args, **kwargs)
return container
def failIfIn(self, container, containee, *args, **kwargs):
# redefined to be useful in callbacks
super(TestImportErrors, self).failIfIn(
containee, container, *args, **kwargs)
return container
def test_trialRun(self):
self.runTrial()
def test_nonexistentModule(self):
d = self.runTrial('twisted.doesntexist')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'twisted.doesntexist')
return d
def test_nonexistentPackage(self):
d = self.runTrial('doesntexist')
self.failUnlessIn(d, 'doesntexist')
self.failUnlessIn(d, 'ValueError')
self.failUnlessIn(d, '[ERROR]')
return d
def test_nonexistentPackageWithModule(self):
d = self.runTrial('doesntexist.barney')
self.failUnlessIn(d, 'doesntexist.barney')
self.failUnlessIn(d, 'ValueError')
self.failUnlessIn(d, '[ERROR]')
return d
def test_badpackage(self):
d = self.runTrial('badpackage')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'badpackage')
self.failIfIn(d, 'IOError')
return d
def test_moduleInBadpackage(self):
d = self.runTrial('badpackage.test_module')
self.failUnlessIn(d, "[ERROR]")
self.failUnlessIn(d, "badpackage.test_module")
self.failIfIn(d, 'IOError')
return d
def test_badmodule(self):
d = self.runTrial('package.test_bad_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_bad_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_badimport(self):
d = self.runTrial('package.test_import_module')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package.test_import_module')
self.failIfIn(d, 'IOError')
self.failIfIn(d, '<module ')
return d
def test_recurseImport(self):
d = self.runTrial('package')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'test_bad_module')
self.failUnlessIn(d, 'test_import_module')
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_recurseImportErrors(self):
d = self.runTrial('package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, 'package2')
self.failUnlessIn(d, 'test_module')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
self.failIfIn(d, 'IOError')
return d
def test_nonRecurseImportErrors(self):
d = self.runTrial('-N', 'package2')
self.failUnlessIn(d, '[ERROR]')
self.failUnlessIn(d, "No module named frotz")
self.failIfIn(d, '<module ')
return d
def test_regularRun(self):
d = self.runTrial('package.test_module')
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_filename(self):
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent, 'package', 'test_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
def test_dosFile(self):
## XXX -- not really an output test, more of a script test
self.mangleSysPath(self.oldPath)
d = self.runTrial(
os.path.join(self.parent,
'package', 'test_dos_module.py'))
self.failIfIn(d, '[ERROR]')
self.failIfIn(d, 'IOError')
self.failUnlessIn(d, 'OK')
self.failUnlessIn(d, 'PASSED (successes=1)')
return d
| [
"[email protected]"
] | |
8215bc9fad745b0ac2dd15c8624629671f8bf65b | 280f650e91c675f471121b8a4a13c2bb5a0a5e6c | /apps/accounts/api/urls.py | 64f242fd26e5d7c4bcebeedcb6344c01e688b9d3 | [] | no_license | navill/2-1_Project_repo | a8e089c657e44034152df30a85220675f2c31084 | 3f62bca9f52799d9f877f2d01259bb51038c0cc4 | refs/heads/master | 2022-12-31T18:32:45.471261 | 2020-10-26T10:54:39 | 2020-10-26T10:54:39 | 303,907,042 | 0 | 0 | null | 2020-10-26T10:54:40 | 2020-10-14T04:57:03 | Python | UTF-8 | Python | false | false | 1,127 | py | from django.urls import path
from accounts.api.views import *
app_name = 'accounts_api'
urlpatterns = [
path('normal/list/', NormalUserListView.as_view(), name='list_normal'),
path('normal/create/', NormalUserCreateView.as_view(), name='create_normal'),
path('normal/detail/<int:pk>', NormalUserRetrieveUpdateView.as_view(), name='detail_normal'),
path('normal/delete/<int:pk>', NormalUserDestroyView.as_view(), name='delete_normal'),
path('staff/list/', StaffUserListView.as_view(), name='list_staff'),
path('staff/create/', StaffUserCreateView.as_view(), name='create_staff'),
path('staff/detail/<int:pk>', StaffUserRetrieveUpdateView.as_view(), name='detail_staff'),
path('staff/delete/<int:pk>', StaffUserDestroyView.as_view(), name='delete_staff'),
path('admin/list/', AdminUserListView.as_view(), name='list_admin'),
path('admin/create/', AdminUserCreateView.as_view(), name='create_admin'),
path('admin/detail/<int:pk>', AdminUserRetrieveUpdateView.as_view(), name='detail_admin'),
path('admin/delete/<int:pk>', AdminUserDestroyView.as_view(), name='delete_admin'),
] | [
"[email protected]"
] | |
d7a58b949717c911b6ddcaa34b4330c57ceafe7c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Link/Link Checker 9.3/third_party/dnspython/dns/rdtypes/ANY/SOA.py | b971c0c12b6b4456101ae18551f46c6025bd705f | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:254fa89eeee239b6fca6b963a4fb01a1a04b6b3c100590811f64124951b90bf5
size 5163
| [
"[email protected]"
] | |
9e2e87721220eb459a320725b72aec702b32a6cf | 0866dd2461a544b43379c8235f5e25c9bf856b72 | /backend/manage.py | 84af162d55f1b74560810ba78d5d9ba2e012516d | [] | no_license | crowdbotics-apps/first-22732 | b63efaa93d05251138e8d9bfdf54aeb55568317f | 194c14a4d1cdff4524d95c7d749110987b6b4750 | refs/heads/master | 2023-01-13T11:16:27.752681 | 2020-11-18T18:38:14 | 2020-11-18T18:38:14 | 314,024,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_22732.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
6082c431e80504897d1bb223171e7ad60d340077 | 11812a0cc7b818292e601ecdd4aa4c4e03d131c5 | /100days_of_python/day8/exercise_2_Prime_Number.py | 54bb65b8d27fd94adc31ace5dc872a9dd2c434f8 | [] | no_license | SunshineFaxixi/Python_Learning | f1e55adcfa898489cc9146ccfb220f0b48a31a22 | ab3ca44d013311b6de02124091acc4c36a83c4d9 | refs/heads/master | 2021-08-16T05:47:29.963118 | 2021-01-04T13:48:30 | 2021-01-04T13:48:30 | 238,857,341 | 1 | 0 | null | 2020-03-03T13:53:08 | 2020-02-07T06:21:46 | HTML | UTF-8 | Python | false | false | 510 | py | def prime_checker(number):
for i in range(2, number):
if number % i == 0:
print("It's a prime number")
break
else:
print("It is not a prime number")
def prime_checker_1(number):
is_prime = True
for i in range(2, number):
if number % i == 0:
is_prime = False
if is_prime:
print("It's a prime number")
else:
print("It is not a prime number")
n = int(input("Check this number: "))
prime_checker_1(number = n) | [
"[email protected]"
] | |
f209d11a13a1fb8de3897a181d5a30c1491e566a | a214e706c875e0af7221c0c9ae193d9d93ee20a7 | /reap_admixture_pedmap.py | f737dff6b82939a2075e48ee932aed835b588014 | [] | no_license | inambioinfo/bioinformatics_scripts | fa2292e91ad4134204a09ace27c8a91ae70fa34c | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | refs/heads/master | 2020-03-20T21:17:10.163061 | 2017-03-28T23:41:39 | 2017-03-28T23:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
__author__ = "Raony Guimarães"
__copyright__ = "Copyright 2012, Filter Analysis"
__credits__ = ["Raony Guimarães"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Raony Guimarães"
__email__ = "[email protected]"
__status__ = "Production"
#run example
#python gatk.py -i alignment/exome.sorted.bam
parser = OptionParser()
parser.add_option("-p", dest="ped",
help="PED File", metavar="pedfile")
parser.add_option("-m", dest="map",
help="MAP File", metavar="mapfile")
(options, args) = parser.parse_args()
pedfilename = ".".join(options.ped.split("/")[-1].split(".")[:-1])
mapfile = ".".join(options.map.split("/")[-1].split(".")[:-1])
plink_dir = '/projects/relatedness/plink-1.07-x86_64'
print 'merge with ped map from 1000genomes'
command = 'python /lgc/scripts/merge_pedmap.py -p %s -q /projects/1000genomes/integrated_call_sets/1092exomes/1092exomes_sureselect.ped -o %s_merged_ceu_yri_amr' % (pedfilename, pedfilename)
os.system(command)
print 'recode12'
filename2 = pedfilename+'_merged_ceu_yri_amr_12'
command = "/projects/relatedness/plink-1.07-x86_64/plink --file %s_merged_ceu_yri_amr --recode12 --output-missing-genotype 0 --missing --noweb --out %s" % (pedfilename, filename2)
os.system(command)
print 'tped'
command = '/projects/relatedness/plink-1.07-x86_64/plink --file %s --recode12 --output-missing-genotype 0 --transpose --out %s --noweb' % (filename2, filename2)
os.system(command)
# print command
print 'running admixture'
command = '/lgc/programs/admixture_linux-1.22/admixture %s.ped 3' % (filename2)
os.system(command)
#prep files for REAP
command = '''cut -d ' ' -f1,2 %s_merged_ceu_yri_amr.ped > myid.txt''' % (pedfilename)
os.system(command)
command = 'paste myid.txt %s.3.Q > admixturedata.proportions' % (filename2)
os.system(command)
command = '''sed 's/ /\t/g' admixturedata.proportions > adm.prop'''
os.system(command)
command = '/lgc/programs/reap/REAP/REAP -g %s.tped -p %s.tfam -a adm.prop -f %s.3.P -r 1 -k 3 -t -100' % (filename2, filename2,filename2)
os.system(command)
command = 'cat REAP_pairs_relatedness.txt | sort -r -n -k 9 > REAP_pairs_relatedness.ordered.txt'
os.system(command)
# command = "./REAP -g %s.tped -p %s.tfam -a trio1_bra_phasing_result.txt -f allelefreqle -k 2 -t 0.025 -r 2 -m" % (filename, filename)
# os.system(command)
| [
"[email protected]"
] | |
517b30739b34bdf437be7b9f435c55989c8c782b | 35fe9e62ab96038705c3bd09147f17ca1225a84e | /a10_ansible/library/a10_scaleout_cluster_service_config_template.py | 61e0116126c6db774e3bae91357a58a630fe31d9 | [] | no_license | bmeidell/a10-ansible | 6f55fb4bcc6ab683ebe1aabf5d0d1080bf848668 | 25fdde8d83946dadf1d5b9cebd28bc49b75be94d | refs/heads/master | 2020-03-19T08:40:57.863038 | 2018-03-27T18:25:40 | 2018-03-27T18:25:40 | 136,226,910 | 0 | 0 | null | 2018-06-05T19:45:36 | 2018-06-05T19:45:36 | null | UTF-8 | Python | false | false | 5,958 | py | #!/usr/bin/python
REQUIRED_NOT_SET = (False, "One of ({}) must be set.")
REQUIRED_MUTEX = (False, "Only one of ({}) can be set.")
REQUIRED_VALID = (True, "")
DOCUMENTATION = """
module: a10_template
description:
-
author: A10 Networks 2018
version_added: 1.8
options:
name:
description:
- Scaleout template Name
bucket-count:
description:
- Number of traffic buckets
device-group:
description:
- Device group id
uuid:
description:
- uuid of the object
user-tag:
description:
- Customized tag
"""
EXAMPLES = """
"""
ANSIBLE_METADATA = """
"""
# Hacky way of having access to object properties for evaluation
AVAILABLE_PROPERTIES = {"bucket_count","device_group","name","user_tag","uuid",}
# our imports go at the top so we fail fast.
from a10_ansible.axapi_http import client_factory
from a10_ansible import errors as a10_ex
def get_default_argspec():
return dict(
a10_host=dict(type='str', required=True),
a10_username=dict(type='str', required=True),
a10_password=dict(type='str', required=True, no_log=True),
state=dict(type='str', default="present", choices=["present", "absent"])
)
def get_argspec():
rv = get_default_argspec()
rv.update(dict(
bucket_count=dict(
type='str'
),
device_group=dict(
type='str'
),
name=dict(
type='str' , required=True
),
user_tag=dict(
type='str'
),
uuid=dict(
type='str'
),
))
return rv
def new_url(module):
"""Return the URL for creating a resource"""
# To create the URL, we need to take the format string and return it with no params
url_base = "/axapi/v3/scaleout/cluster/{cluster-id}/service-config/template/{name}"
f_dict = {}
f_dict["name"] = ""
return url_base.format(**f_dict)
def existing_url(module):
"""Return the URL for an existing resource"""
# Build the format dictionary
url_base = "/axapi/v3/scaleout/cluster/{cluster-id}/service-config/template/{name}"
f_dict = {}
f_dict["name"] = module.params["name"]
return url_base.format(**f_dict)
def build_envelope(title, data):
return {
title: data
}
def build_json(title, module):
rv = {}
for x in AVAILABLE_PROPERTIES:
v = module.params.get(x)
if v:
rx = x.replace("_", "-")
rv[rx] = module.params[x]
return build_envelope(title, rv)
def validate(params):
# Ensure that params contains all the keys.
requires_one_of = sorted([])
present_keys = sorted([x for x in requires_one_of if params.get(x)])
errors = []
marg = []
if not len(requires_one_of):
return REQUIRED_VALID
if len(present_keys) == 0:
rc,msg = REQUIRED_NOT_SET
marg = requires_one_of
elif requires_one_of == present_keys:
rc,msg = REQUIRED_MUTEX
marg = present_keys
else:
rc,msg = REQUIRED_VALID
if not rc:
errors.append(msg.format(", ".join(marg)))
return rc,errors
def exists(module):
try:
module.client.get(existing_url(module))
return True
except a10_ex.NotFound:
return False
def create(module, result):
payload = build_json("template", module)
try:
post_result = module.client.post(new_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.Exists:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def delete(module, result):
try:
module.client.delete(existing_url(module))
result["changed"] = True
except a10_ex.NotFound:
result["changed"] = False
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def update(module, result):
payload = build_json("template", module)
try:
post_result = module.client.put(existing_url(module), payload)
result.update(**post_result)
result["changed"] = True
except a10_ex.ACOSException as ex:
module.fail_json(msg=ex.msg, **result)
except Exception as gex:
raise gex
return result
def present(module, result):
if not exists(module):
return create(module, result)
else:
return update(module, result)
def absent(module, result):
return delete(module, result)
def run_command(module):
run_errors = []
result = dict(
changed=False,
original_message="",
message=""
)
state = module.params["state"]
a10_host = module.params["a10_host"]
a10_username = module.params["a10_username"]
a10_password = module.params["a10_password"]
# TODO(remove hardcoded port #)
a10_port = 443
a10_protocol = "https"
valid, validation_errors = validate(module.params)
map(run_errors.append, validation_errors)
if not valid:
result["messages"] = "Validation failure"
err_msg = "\n".join(run_errors)
module.fail_json(msg=err_msg, **result)
module.client = client_factory(a10_host, a10_port, a10_protocol, a10_username, a10_password)
if state == 'present':
result = present(module, result)
elif state == 'absent':
result = absent(module, result)
return result
def main():
module = AnsibleModule(argument_spec=get_argspec())
result = run_command(module)
module.exit_json(**result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
783a054e5bea735eb04b33c1d0b8064fa8afc87f | c65fb9bb01125789ccbbbb2252c3d72a68f32846 | /Addition/PythonPlotter/DracoBip/plot_config.py | 0e81ddbcb7e36db7e32ee4a592f9846fd7017ec8 | [
"MIT"
] | permissive | PandoraThanator/PnC | 64cf17cfe92ce41bcc790db139bda190e1fe18a3 | b123747671a307822d94400730233722f21a9328 | refs/heads/master | 2023-05-03T21:29:25.318698 | 2021-05-25T06:22:01 | 2021-05-25T06:22:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py | import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import os
## -----------------------------------------------------------------------------
## Read Data
## -----------------------------------------------------------------------------
file_path = os.getcwd() + "/../../../ExperimentDataCheck/"
t = np.genfromtxt(file_path+'running_time.txt', delimiter='\n', dtype=(float))
st_idx = 5
end_idx = len(t) - 10
t = t[st_idx:end_idx]
config = np.genfromtxt(file_path+'config.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
qdot = np.genfromtxt(file_path+'qdot.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
data_phse = np.genfromtxt(file_path+'phase.txt', delimiter=None, dtype=(float))[st_idx:end_idx]
phseChange = []
for i in range(0,len(t)-1):
if data_phse[i] != data_phse[i+1]:
phseChange.append(i)
else:
pass
## -----------------------------------------------------------------------------
## Plot Cmd
## -----------------------------------------------------------------------------
def plot_phase(ax):
for j in phseChange:
ax.axvline(x=t[j],color='indigo',linestyle='-')
ax.text(t[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')
fig, axes = plt.subplots(6, 2)
for i in range(6):
axes[i,0].plot(t, config[:,i], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, qdot[:,i], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("q0 ~ q5")
axes[0,1].set_title("qdot0 ~ qdot5")
fig, axes = plt.subplots(5, 2)
for i in range(5):
axes[i,0].plot(t, config[:,i+6], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, config[:,i+11], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("q6 ~ q10")
axes[0,1].set_title("q11 ~ q15")
fig, axes = plt.subplots(5, 2)
for i in range(5):
axes[i,0].plot(t, qdot[:,i+6], color='k', linewidth=3)
axes[i,0].grid(True)
plot_phase(axes[i,0])
axes[i,1].plot(t, qdot[:,i+11], color='k', linewidth=3)
axes[i,1].grid(True)
plot_phase(axes[i,1])
axes[0,0].set_title("qdot6 ~ qdot10")
axes[0,1].set_title("qdot11 ~ qdot15")
plt.show()
| [
"[email protected]"
] | |
d0f0a9fb82a86f4b101d297ef9d21ca60a54a2d0 | 0e8d49afd0e35510d8fa6901cf216896604240d8 | /lib/pyfrc/mains/cli_coverage.py | d5026f5eb96a193efd1f13233ac13c79759a6f2a | [
"MIT"
] | permissive | ThunderDogs5613/pyfrc | 3878a3d887d7adcb957128333ee71fc874c56f2b | d8e76a9284690f71ea7fab7d2aa9022cb6eec27d | refs/heads/master | 2021-08-29T14:21:13.124227 | 2017-12-04T05:46:40 | 2017-12-04T05:46:40 | 114,410,477 | 1 | 0 | null | 2017-12-15T20:55:31 | 2017-12-15T20:55:30 | null | UTF-8 | Python | false | false | 1,637 | py |
import argparse
import inspect
from os.path import dirname
import subprocess
import sys
class PyFrcCoverage:
"""
Wraps other commands by running them via the coverage module. Requires
the coverage module to be installed.
"""
def __init__(self, parser):
parser.add_argument('args', nargs=argparse.REMAINDER,
help='Arguments to pass to robot.py')
def run(self, options, robot_class, **static_options):
try:
import coverage
except ImportError:
print("Error importing coverage module for code coverage testing, did you install it?\n" +
"You can download it at https://pypi.python.org/pypi/coverage\n", file=sys.stderr)
return 1
if len(options.args) == 0:
print("ERROR: Coverage command requires arguments to run other commands")
return 1
file_location = inspect.getfile(robot_class)
option_args = list(options.args)
if option_args[0] == 'test':
option_args.insert(1, '--coverage-mode')
# construct the arguments to run coverage
args = [sys.executable, '-m', 'coverage',
'run', '--source', dirname(file_location),
file_location] + list(options.args)
retval = subprocess.call(args)
if retval != 0:
return retval
args = [sys.executable, '-m', 'coverage',
'report', '-m']
return subprocess.call(args)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.