code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def MultimodeCombine(pupils):
fluxes=[np.vdot(pupils[i],pupils[i]).real for i in range(len(pupils))]
coherentFluxes=[np.vdot(pupils[i],pupils[j])
for i in range(1,len(pupils))
for j in range(i)]
return fluxes,coherentFluxes | Return the instantaneous coherent fluxes and photometric fluxes for a
multiway multimode combiner (no spatial filtering) |
def to_unicode(s):
if not isinstance(s, TEXT):
if not isinstance(s, bytes):
raise TypeError('You are required to pass either unicode or '
'bytes here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError as le:
raise TypeError('You are required to pass either a unicode '
'object or a utf-8-encoded bytes string here. '
'You passed a bytes object which contained '
'non-utf-8: %r. The UnicodeDecodeError that '
'resulted from attempting to interpret it as '
'utf-8 was: %s'
% (s, le,))
return s | Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. |
def to_postdata(self):
items = []
for k, v in sorted(self.items()): # predictable for testing
items.append((k.encode('utf-8'), to_utf8_optional_iterator(v)))
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urlencode(items, True).replace('+', '%20').encode('ascii') | Serialize as post data for a POST request. |
def to_url(self):
base_url = urlparse(self.url)
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(to_utf8_optional_iterator(v))
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
url = (scheme, netloc, path, params, urlencode(query, True), fragment)
return urlunparse(url) | Serialize as a URL for a GET request. |
def _split_header(header):
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.lower().startswith('realm='):
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = unquote(param_parts[1].strip('\"'))
return params | Turn Authorization: header into parameters. |
def fetch_request_token(self, oauth_request):
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except Error:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except Error:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token | Processes a request_token request and returns the
request token on success. |
def fetch_access_token(self, oauth_request):
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except Error:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token | Processes an access_token request and returns the
access token on success. |
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token | Try to find the token for the provided request token key. |
def clean_chars(value):
"Hack to remove non-ASCII data. Should convert to Unicode: code page 437?"
value = value.replace('\xb9', ' ')
value = value.replace('\xf8', ' ')
value = value.replace('\xab', ' ')
value = value.replace('\xa7', ' ')
value = value.replace('\xa8', ' ')
value = value.replace('\xfb', ' ')
value = value.replace('\xfc', ' ')
return valuf clean_chars(value):
"Hack to remove non-ASCII data. Should convert to Unicode: code page 437?"
value = value.replace('\xb9', ' ')
value = value.replace('\xf8', ' ')
value = value.replace('\xab', ' ')
value = value.replace('\xa7', ' ')
value = value.replace('\xa8', ' ')
value = value.replace('\xfb', ' ')
value = value.replace('\xfc', ' ')
return value | Hack to remove non-ASCII data. Should convert to Unicode: code page 437? |
def vals(self, x, *args, **kwargs):
x = np.atleast_1d(x)
return self._vals(x, *args, **kwargs) | [Docstring] |
def fit_lsq(self, x, y_obs, params_start=None):
# Set up variables
x = np.atleast_1d(x)
y_obs = np.atleast_1d(y_obs)
if not params_start:
params_start = np.ones(self.n_parameters)
# Error checking
if len(x) != len(y_obs):
raise ValueError, "x and y_obs must be the same length"
if len(params_start) != self.n_parameters:
raise ValueError, "Incorrect number of values in params_start"
# Calculate fit
def residuals(params, x, y_obs):
y_pred = self.vals(x, *params)
return y_obs - y_pred
params_fit, _, _, msg, ier = optimize.leastsq(residuals, params_start,
args=(x, y_obs), full_output=True)
# Check for convergence
if ier > 4:
raise ValueError, ("Least squares fit did not converge with "
"message %s" % msg)
return tuple(params_fit) | Fit curve by method of least squares.
Parameters
----------
x : iterable
Independent variable
y_obs : iterable
Dependent variable (values observed at x)
params_start : iterable
Optional start values for all parameters. Default 1.
Returns
-------
array
Best fit values of parameters
Notes
-----
If least squares fit does not converge, ValueError is raised with
convergence message. |
def fit_lsq(self, df):
tdf = df.set_index('div')
return tdf.ix['1,1']['n_spp'], tdf.ix['1,1']['n_individs'] | Parameterize generic SAR curve from empirical data set
Parameters
----------
df : DataFrame
Result data frame from empirical SAR analysis
Notes
-----
Simply returns S0 and N0 from empirical SAR output, which are two fixed
parameters of METE SAR and EAR. This simply returns n_spp and
n_individs from the 1,1 division in
the dataframe. An error will be thrown if this division is not present
The ``fit_lsq`` is retained for consistency with other curves. |
def draw(self):
self.update_all()
self.vertex_list.draw(self.gl)
pyglet.gl.glLoadIdentity() | 使用draw方法将图形绘制在窗口里 |
def update_all(self):
self.update_points()
self.update_vertex_list()
self.update_anchor()
pyglet.gl.glLoadIdentity() # reset gl
pyglet.gl.glLineWidth(self.line_width)
pyglet.gl.glPointSize(self.point_size)
self.transform.update_gl()
# handle shapes click envets
all_shapes.discard(self)
if(self._press != None):
all_shapes.add(self) | 在绘制之前,针对形变进行计算,通过设置openGL的属性来达到绘制出变形的图形 |
def update_vertex_list(self):
color = color_to_tuple(self.color, self.opacity)
length = len(self.points) // 2
self.vertex_list = pyglet.graphics.vertex_list(
length,
('v2f', self.points),
('c4B', color * length)) | 使用pyglet来绘制基本图形之前,转为pyglet识别的属性 |
def update_anchor(self):
t = self.transform
self.update_collision_rect()
if t.anchor_x_r and t.anchor_y_r:
t.anchor_x = self.min_x + (self.max_x - self.min_x) * t.anchor_x_r
t.anchor_y = self.min_y + (self.max_y - self.min_y) * t.anchor_y_r | 如果是使用set_anchor_rate来设定锚点,那么就需要不停的更新锚点的位置 |
def bulk_send(self, topic, kmsgs):
try:
self.client.do_request(
method="POST", path="/topic/{}".format(topic), data=[
dict(Value=k.MARSHMALLOW_SCHEMA.dump(k)) for k in kmsgs
]
)
return Result(stdout="{} message(s) sent".format(len(kmsgs)))
except Exception as exc:
return Result.from_exception(exc) | Send a batch of messages
:param str topic: a kafka topic
:param ksr.transport.Message kmsgs: Messages to serialize
:return: Execution result
:rtype: kser.result.Result |
def send(self, topic, kmsg):
try:
self.client.do_request(
method="POST", params=dict(format="raw"),
path="/topic/{}".format(topic),
data=kmsg.MARSHMALLOW_SCHEMA.dump(kmsg)
)
result = Result(
uuid=kmsg.uuid, stdout="Message sent: {} ({})".format(
kmsg.uuid, kmsg.entrypoint
)
)
except Exception as exc:
result = Result.from_exception(exc, kmsg.uuid)
finally:
# noinspection PyUnboundLocalVariable
if result.retcode < 300:
return self._onsuccess(kmsg=kmsg, result=result)
else:
return self._onerror(kmsg=kmsg, result=result) | Send the message into the given topic
:param str topic: a kafka topic
:param ksr.transport.Message kmsg: Message to serialize
:return: Execution result
:rtype: kser.result.Result |
def batch_update(self, command, rows):
request = {
"database": {
"alias": self.__options['dbAlias']
},
"batchUpdate": {
"command": command,
"rows": rows,
"shardKey": self.__options.get('shardKey'),
}
}
dr = self.__app.native_api_call('db', 'batch-update', request, self.__options, False)
return json.loads(dr.text) | Для массовой вставки умеренных объемов 1-5к записей за вызов
:param command: SQL insert or updtae
:param rows: list of dict
:return: dict |
def update(self, command, params=None):
request = {
"database": {
"alias": self.__options['dbAlias']
},
"dbQuery": {
"command": command,
"parameters": params,
"shardKey": self.__options.get('shardKey'),
}
}
dr = self.__app.native_api_call('db', 'update', request, self.__options, False)
return json.loads(dr.text) | Запросы на INSERT, UPDATE, DELETE и пр. не возвращающие результата должны выполняться через этот метод
Исключение такие запросы с RETURNING для PostgreSQL
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: object DataResult |
def one(self, command, params=None):
dr = self.query(command, params)
if dr['rows']:
return dr['rows'][0]
else:
return None | Возвращает первую строку ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: dict |
def all(self, command, params=None):
dr = self.query(command, params)
return dr['rows'] | Возвращает строки ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: list of dict |
def after_insert(mapper, connection, target):
record_after_update.send(CmtRECORDCOMMENT, recid=target.id_bibrec)
from .api import get_reply_order_cache_data
if target.in_reply_to_id_cmtRECORDCOMMENT > 0:
parent = CmtRECORDCOMMENT.query.get(
target.in_reply_to_id_cmtRECORDCOMMENT)
if parent:
trans = connection.begin()
parent_reply_order = parent.reply_order_cached_data \
if parent.reply_order_cached_data else ''
parent_reply_order += get_reply_order_cache_data(target.id)
connection.execute(
db.update(CmtRECORDCOMMENT.__table__).
where(CmtRECORDCOMMENT.id == parent.id).
values(reply_order_cached_data=parent_reply_order))
trans.commit() | Update reply order cache and send record-after-update signal. |
def is_collapsed(self, id_user):
return CmtCOLLAPSED.query.filter(db.and_(
CmtCOLLAPSED.id_bibrec == self.id_bibrec,
CmtCOLLAPSED.id_cmtRECORDCOMMENT == self.id,
CmtCOLLAPSED.id_user == id_user)).count() > 0 | Return true if the comment is collapsed by user. |
def collapse(self, id_user):
c = CmtCOLLAPSED(id_bibrec=self.id_bibrec, id_cmtRECORDCOMMENT=self.id,
id_user=id_user)
db.session.add(c)
db.session.commit() | Collapse comment beloging to user. |
def expand(self, id_user):
CmtCOLLAPSED.query.filter(db.and_(
CmtCOLLAPSED.id_bibrec == self.id_bibrec,
CmtCOLLAPSED.id_cmtRECORDCOMMENT == self.id,
CmtCOLLAPSED.id_user == id_user)).delete(synchronize_session=False) | Expand comment beloging to user. |
def count(cls, *criteria, **filters):
return cls.query.filter(*criteria).filter_by(**filters).count() | Count how many comments. |
def get_version(version=None):
if version[4] > 0: # 0.2.1-alpha.1
return "%s.%s.%s-%s.%s" % (version[0], version[1], version[2], version[3], version[4])
elif version[3] != '': # 0.2.1-alpha
return "%s.%s.%s-%s" % (version[0], version[1], version[2], version[3])
elif version[2] > 0: # 0.2.1
return "%s.%s.%s" % (version[0], version[1], version[2])
else: # 0.2
return "%s.%s" % (version[0], version[1]) | Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided. |
def list_all_refund_operations(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_refund_operations_with_http_info(**kwargs)
else:
(data) = cls._list_all_refund_operations_with_http_info(**kwargs)
return data | List RefundOperations
Return a list of RefundOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_refund_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[RefundOperation]
If the method is called asynchronously,
returns the request thread. |
def _push_packet(self, packet):
self._read_queue.append((decode(packet), packet))
if self._read_waiter is not None:
w, self._read_waiter = self._read_waiter, None
w.set_result(None) | Appends a packet to the internal read queue, or notifies
a waiting listener that a packet just came in. |
def _read_data(self):
while True:
try:
data = yield from self._socket.recv()
except asyncio.CancelledError:
break
except ConnectionClosed:
break
self._push_packet(data)
self._loop.call_soon(self.close) | Reads data from the connection and adds it to _push_packet,
until the connection is closed or the task in cancelled. |
def wait_message(self):
if self._state != states['open']:
return False
if len(self._read_queue) > 0:
return True
assert self._read_waiter is None or self._read_waiter.cancelled(), \
"You may only use one wait_message() per connection."
self._read_waiter = asyncio.Future(loop=self._loop)
yield from self._read_waiter
return self.wait_message() | Waits until a connection is available on the wire, or until
the connection is in a state that it can't accept messages.
It returns True if a message is available, False otherwise. |
def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'):
reservation_ports = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName == model_name:
reservation_ports.append(resource)
return reservation_ports | Get all Generic Traffic Generator Port in reservation.
:return: list of all Generic Traffic Generator Port resource objects in reservation |
def get_reservation_resources(session, reservation_id, *models):
models_resources = []
reservation = session.GetReservationDetails(reservation_id).ReservationDescription
for resource in reservation.Resources:
if resource.ResourceModelName in models:
models_resources.append(resource)
return models_resources | Get all resources of given models in reservation.
:param session: CloudShell session
:type session: cloudshell.api.cloudshell_api.CloudShellAPISession
:param reservation_id: active reservation ID
:param models: list of requested models
:return: list of all resources of models in reservation |
def change_issue_status(self, issue_id, status_id: str):
self.__metadb.update("""
update meta.issue set
issue_status_id=:status_id,
assignee_user_id=valera_user_id(),
last_user_id=valera_user_id()
where id = :issue_id
""", {"issue_id": issue_id, "status_id": status_id}) | Смета статуса тикета
:param issue_id: int
:param status_id: int |
def get_current_container_id(read_from='/proc/self/cgroup'):
if not os.path.exists(read_from):
return
with open(read_from, 'r') as cgroup:
for line in cgroup:
if re.match('.*/[0-9a-f]{64}$', line.strip()):
return re.sub('.*/([0-9a-f]{64})$', '\\1', line.strip()) | Get the ID of the container the application is currently running in,
otherwise return `None` if not running in a container.
This is a best-effort guess, based on cgroups.
:param read_from: the cgroups file to read from (default: `/proc/self/cgroup`) |
def read_configuration(key, path=None, default=None, single_config=False, fallback_to_env=True):
if path and os.path.exists(path):
with open(path, 'r') as config_file:
if single_config:
return config_file.read()
for line in config_file:
if line.startswith('%s=' % key):
return line.split('=', 1)[1].strip()
if fallback_to_env and key in os.environ:
return os.environ[key]
return default | Read configuration from a file, Docker config or secret or from the environment variables.
:param key: the configuration key
:param path: the path of the configuration file (regular file or Docker config or secret)
:param default: the default value when not found elsewhere (default: `None`)
:param single_config: treat the configuration file as containing the full configuration,
otherwise the file is expected to be a '=' separated key-value list line by line
(default: `False`)
:param fallback_to_env: look for the configuration key in the environment variables
if not found elsewhere (default: `True`) |
def CleanString(s):
punc = (' ', '-', '\'', '.', '&', '&', '+', '@')
pieces = []
for part in s.split():
part = part.strip()
for p in punc:
part = part.replace(p, '_')
part = part.strip('_')
part = part.lower()
pieces.append(part)
return '_'.join(pieces) | Cleans up string.
Doesn't catch everything, appears to sometimes allow double underscores
to occur as a result of replacements. |
def DedupVcardFilenames(vcard_dict):
remove_keys = []
add_pairs = []
for k, v in vcard_dict.items():
if not len(v) > 1:
continue
for idx, vcard in enumerate(v):
fname, ext = os.path.splitext(k)
fname = '{}-{}'.format(fname, idx + 1)
fname = fname + ext
assert fname not in vcard_dict
add_pairs.append((fname, vcard))
remove_keys.append(k)
for k, v in add_pairs:
vcard_dict[k].append(v)
for k in remove_keys:
vcard_dict.pop(k)
return vcard_dict | Make sure every vCard in the dictionary has a unique filename. |
def WriteVcard(filename, vcard, fopen=codecs.open):
if os.access(filename, os.F_OK):
logger.warning('File exists at "{}", skipping.'.format(filename))
return False
try:
with fopen(filename, 'w', encoding='utf-8') as f:
logger.debug('Writing {}:\n{}'.format(filename, u(vcard.serialize())))
f.write(u(vcard.serialize()))
except OSError:
logger.error('Error writing to file "{}", skipping.'.format(filename))
return False
return True | Writes a vCard into the given filename. |
def create_cash_on_delivery_payment(cls, cash_on_delivery_payment, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_cash_on_delivery_payment_with_http_info(cash_on_delivery_payment, **kwargs)
else:
(data) = cls._create_cash_on_delivery_payment_with_http_info(cash_on_delivery_payment, **kwargs)
return data | Create CashOnDeliveryPayment
Create a new CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_cash_on_delivery_payment(cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to create (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread. |
def delete_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
else:
(data) = cls._delete_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data | Delete CashOnDeliveryPayment
Delete an instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
else:
(data) = cls._get_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, **kwargs)
return data | Find CashOnDeliveryPayment
Return single instance of CashOnDeliveryPayment by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to return (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread. |
def list_all_cash_on_delivery_payments(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_cash_on_delivery_payments_with_http_info(**kwargs)
else:
(data) = cls._list_all_cash_on_delivery_payments_with_http_info(**kwargs)
return data | List CashOnDeliveryPayments
Return a list of CashOnDeliveryPayments
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_cash_on_delivery_payments(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[CashOnDeliveryPayment]
If the method is called asynchronously,
returns the request thread. |
def replace_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
else:
(data) = cls._replace_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
return data | Replace CashOnDeliveryPayment
Replace all attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to replace (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to replace (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread. |
def update_cash_on_delivery_payment_by_id(cls, cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
else:
(data) = cls._update_cash_on_delivery_payment_by_id_with_http_info(cash_on_delivery_payment_id, cash_on_delivery_payment, **kwargs)
return data | Update CashOnDeliveryPayment
Update attributes of CashOnDeliveryPayment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_cash_on_delivery_payment_by_id(cash_on_delivery_payment_id, cash_on_delivery_payment, async=True)
>>> result = thread.get()
:param async bool
:param str cash_on_delivery_payment_id: ID of cashOnDeliveryPayment to update. (required)
:param CashOnDeliveryPayment cash_on_delivery_payment: Attributes of cashOnDeliveryPayment to update. (required)
:return: CashOnDeliveryPayment
If the method is called asynchronously,
returns the request thread. |
def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, Nonf dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None | connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error |
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close(ync def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close() | starts event listener for the contract
:return: |
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, Nonf send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None | runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return: |
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, Nonf get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None | get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return: |
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, Nonf create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None | given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error |
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, Nonf load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None | loads a contract file
:param filepath: (string) - contract filename
:return: source, err |
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, Nonf compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None | compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source |
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), Nonf get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None | fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any |
def put(contract_name, abi):
'''
save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error
'''
if not Catalog.path:
return None, "path to catalog must be set before saving to it"
if not contract_name:
return None, "contract name must be provided before saving"
if not abi:
return None, "contract ABI missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "w+") as file:
file.write(abi)
except Exception as e:
return None, "Catalog.put error: {}".format(e)
return None, Nonf put(contract_name, abi):
'''
save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error
'''
if not Catalog.path:
return None, "path to catalog must be set before saving to it"
if not contract_name:
return None, "contract name must be provided before saving"
if not abi:
return None, "contract ABI missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "w+") as file:
file.write(abi)
except Exception as e:
return None, "Catalog.put error: {}".format(e)
return None, None | save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error |
def get(contract_name):
'''
return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error
'''
if not Catalog.path:
return None, "path to catalog must be set before searching it"
if not contract_name:
return None, "contract name missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "r") as file:
abi = file.read()
except Exception as e:
return None, "Catalog.get error: {}".format(e)
return abi, Nonf get(contract_name):
'''
return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error
'''
if not Catalog.path:
return None, "path to catalog must be set before searching it"
if not contract_name:
return None, "contract name missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "r") as file:
abi = file.read()
except Exception as e:
return None, "Catalog.get error: {}".format(e)
return abi, None | return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error |
def open(self):
'''
attempts to open the database. if it gets a locked message, it will wait one second and try
again. if it is still locked, it will return an error
:return: None, None if successful
None, error if error
'''
cycle = 2
count = 0
while(True):
try:
DB.db = plyvel.DB(DB.dbpath, create_if_missing=True).prefixed_db(bytes(DB.project, 'utf-8'))
if DB.db:
break
except Exception as e:
# wait for other program to unlock the db
count+=1
time.sleep(1)
if count >= cycle:
return None, "DB error: {}".format(e)
return None, Nonf open(self):
'''
attempts to open the database. if it gets a locked message, it will wait one second and try
again. if it is still locked, it will return an error
:return: None, None if successful
None, error if error
'''
cycle = 2
count = 0
while(True):
try:
DB.db = plyvel.DB(DB.dbpath, create_if_missing=True).prefixed_db(bytes(DB.project, 'utf-8'))
if DB.db:
break
except Exception as e:
# wait for other program to unlock the db
count+=1
time.sleep(1)
if count >= cycle:
return None, "DB error: {}".format(e)
return None, None | attempts to open the database. if it gets a locked message, it will wait one second and try
again. if it is still locked, it will return an error
:return: None, None if successful
None, error if error |
def transform_from_chomsky_normal_form(root):
# type: (Nonterminal) -> Nonterminal
# Transforms leaves
items = Traversing.post_order(root)
items = filter(lambda x: isinstance(x, (ChomskyTermRule, ChomskyTerminalReplaceRule)), items)
de = deque(items)
while de:
rule = de.popleft()
if isinstance(rule, ChomskyTermRule):
upper_nonterm = rule.from_symbols[0] # type: Nonterminal
term = rule.to_symbols[0]
Manipulations.replaceNode(upper_nonterm, term)
elif isinstance(rule, ChomskyTerminalReplaceRule):
created_rule = rule.from_rule() # type: Rule
Manipulations.replaceRule(rule, created_rule)
de.append(created_rule)
# Transform inner nodes
items = Traversing.post_order(root)
items = filter(lambda x: isinstance(x, ChomskySplitRule), items)
de = deque(items)
while de:
rule = de.popleft()
if isinstance(rule, ChomskySplitRule):
created_rule = rule.from_rule() # type: Rule
# parent nonterminals
for p in rule.from_symbols: # type: Nonterminal
p._set_to_rule(created_rule)
created_rule._from_symbols.append(p)
# left child
left_child = rule.to_symbols[0] # type: Nonterminal
left_child._set_from_rule(created_rule)
created_rule._to_symbols.append(left_child)
# right childs
for ch in rule.to_symbols[1].to_rule.to_symbols: # type: Nonterminal
ch._set_from_rule(created_rule)
created_rule.to_symbols.append(ch)
# add back if the rules is ChomskySplitRule again
de.appendleft(created_rule)
return root | Transform the tree created by grammar in the Chomsky Normal Form to original rules.
:param root: Root of parsed tree.
:return: Modified tree. |
def create_return_operation(cls, return_operation, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_return_operation_with_http_info(return_operation, **kwargs)
else:
(data) = cls._create_return_operation_with_http_info(return_operation, **kwargs)
return data | Create ReturnOperation
Create a new ReturnOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_return_operation(return_operation, async=True)
>>> result = thread.get()
:param async bool
:param ReturnOperation return_operation: Attributes of returnOperation to create (required)
:return: ReturnOperation
If the method is called asynchronously,
returns the request thread. |
def delete_return_operation_by_id(cls, return_operation_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
else:
(data) = cls._delete_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
return data | Delete ReturnOperation
Delete an instance of ReturnOperation by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_return_operation_by_id(return_operation_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_operation_id: ID of returnOperation to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_return_operation_by_id(cls, return_operation_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
else:
(data) = cls._get_return_operation_by_id_with_http_info(return_operation_id, **kwargs)
return data | Find ReturnOperation
Return single instance of ReturnOperation by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_return_operation_by_id(return_operation_id, async=True)
>>> result = thread.get()
:param async bool
:param str return_operation_id: ID of returnOperation to return (required)
:return: ReturnOperation
If the method is called asynchronously,
returns the request thread. |
def list_all_return_operations(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_return_operations_with_http_info(**kwargs)
else:
(data) = cls._list_all_return_operations_with_http_info(**kwargs)
return data | List ReturnOperations
Return a list of ReturnOperations
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_return_operations(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[ReturnOperation]
If the method is called asynchronously,
returns the request thread. |
def replace_return_operation_by_id(cls, return_operation_id, return_operation, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs)
else:
(data) = cls._replace_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs)
return data | Replace ReturnOperation
Replace all attributes of ReturnOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_return_operation_by_id(return_operation_id, return_operation, async=True)
>>> result = thread.get()
:param async bool
:param str return_operation_id: ID of returnOperation to replace (required)
:param ReturnOperation return_operation: Attributes of returnOperation to replace (required)
:return: ReturnOperation
If the method is called asynchronously,
returns the request thread. |
def update_return_operation_by_id(cls, return_operation_id, return_operation, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs)
else:
(data) = cls._update_return_operation_by_id_with_http_info(return_operation_id, return_operation, **kwargs)
return data | Update ReturnOperation
Update attributes of ReturnOperation
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_return_operation_by_id(return_operation_id, return_operation, async=True)
>>> result = thread.get()
:param async bool
:param str return_operation_id: ID of returnOperation to update. (required)
:param ReturnOperation return_operation: Attributes of returnOperation to update. (required)
:return: ReturnOperation
If the method is called asynchronously,
returns the request thread. |
def console_logger(
level="WARNING"
):
################ > IMPORTS ################
## STANDARD LIB ##
import logging
import logging.config
## THIRD PARTY ##
import yaml
try:
yaml.warnings({'YAMLLoadWarning': False})
except:
pass
## LOCAL APPLICATION ##
# SETUP LOGGING
loggerConfig = """
version: 1
formatters:
console_style:
format: '* %(asctime)s - %(levelname)s: %(pathname)s:%(funcName)s:%(lineno)d > %(message)s'
datefmt: '%H:%M:%S'
handlers:
console:
class: logging.StreamHandler
level: """ + level + """
formatter: console_style
stream: ext://sys.stdout
root:
level: """ + level + """
handlers: [console]"""
logging.config.dictConfig(yaml.load(loggerConfig))
logger = logging.getLogger(__name__)
return logger | *Setup and return a console logger*
**Key Arguments:**
- ``level`` -- the level of logging required
**Return:**
- ``logger`` -- the console logger
**Usage:**
.. code-block:: python
from fundamentals import logs
log = logs.console_logger(
level="DEBUG"
)
log.debug("Testing console logger") |
def doRollover(self):
# Rotate the file first.
handlers.RotatingFileHandler.doRollover(self)
# Add group write to the current permissions.
currMode = os.stat(self.baseFilename).st_mode
os.chmod(self.baseFilename, currMode | stat.S_IWGRP |
stat.S_IRGRP | stat.S_IWOTH | stat.S_IROTH) | *Override base class method to make the new log file group writable.* |
def get_creators(self, attribute='creatorName'):
if 'creators' in self.xml:
if isinstance(self.xml['creators']['creator'], list):
return [c[attribute] for c in self.xml['creators']['creator']]
else:
return self.xml['creators']['creator'][attribute]
return None | Get DataCite creators. |
def get_dates(self):
if 'dates' in self.xml:
if isinstance(self.xml['dates']['date'], dict):
return self.xml['dates']['date'].values()[0]
return self.xml['dates']['date']
return None | Get DataCite dates. |
def get_description(self, description_type='Abstract'):
if 'descriptions' in self.xml:
if isinstance(self.xml['descriptions']['description'], list):
for description in self.xml['descriptions']['description']:
if description_type in description:
return description[description_type]
elif isinstance(self.xml['descriptions']['description'], dict):
description = self.xml['descriptions']['description']
if description_type in description:
return description[description_type]
elif len(description) == 1:
# return the only description
return description.values()[0]
return None | Get DataCite description. |
def itemgetter(iterable, indexes):
''' same functionality as operator.itemgetter except, this one supports
both positive and negative indexing of generators as well '''
indexes = indexes if isinstance(indexes, tuple) else tuple(indexes)
assert all(isinstance(i, int) for i in indexes), 'indexes needs to be a tuple of ints'
positive_indexes=[i for i in indexes if i>=0]
negative_indexes=[i for i in indexes if i<0]
out = {}
if len(negative_indexes):
# if there are any negative indexes
negative_index_buffer = deque(maxlen=min(indexes)*-1)
for i,x in enumerate(iterable):
if i in positive_indexes:
out[i]=x
negative_index_buffer.append(x)
out.update({ni:negative_index_buffer[ni] for ni in negative_indexes})
else:
# if just positive results
out.update({i:x for i,x in enumerate(iterable) if i in positive_indexes})
return itemgetter(*indexes)(outf itemgetter(iterable, indexes):
''' same functionality as operator.itemgetter except, this one supports
both positive and negative indexing of generators as well '''
indexes = indexes if isinstance(indexes, tuple) else tuple(indexes)
assert all(isinstance(i, int) for i in indexes), 'indexes needs to be a tuple of ints'
positive_indexes=[i for i in indexes if i>=0]
negative_indexes=[i for i in indexes if i<0]
out = {}
if len(negative_indexes):
# if there are any negative indexes
negative_index_buffer = deque(maxlen=min(indexes)*-1)
for i,x in enumerate(iterable):
if i in positive_indexes:
out[i]=x
negative_index_buffer.append(x)
out.update({ni:negative_index_buffer[ni] for ni in negative_indexes})
else:
# if just positive results
out.update({i:x for i,x in enumerate(iterable) if i in positive_indexes})
return itemgetter(*indexes)(out) | same functionality as operator.itemgetter except, this one supports
both positive and negative indexing of generators as well |
def refresh(self):
try:
# suport for RPiDisplay SSD1306 driver
self.Display.setImage( self._catchCurrentViewContent() )
except:
try:
# suport for Adafruit SSD1306 driver
self.Display.image( self._catchCurrentViewContent() )
except:
raise "Can not update image to buffer."
self.Display.display() | !
\~english
Update current view content to display
Supported: JMRPiDisplay_SSD1306 and Adafruit SSD1306 driver
\~chinese
更新当前视图内容到显示屏
支持: JMRPiDisplay_SSD1306 和 Adafruit SSD1306 driver |
def google_register(username:str, email:str, full_name:str, google_id:int, bio:str, token:str=None):
auth_data_model = apps.get_model("users", "AuthData")
user_model = apps.get_model("users", "User")
try:
# Google user association exist?
auth_data = auth_data_model.objects.get(key="google", value=google_id)
user = auth_data.user
except auth_data_model.DoesNotExist:
try:
# Is a user with the same email as the google user?
user = user_model.objects.get(email=email)
auth_data_model.objects.create(user=user, key="google", value=google_id, extra={})
except user_model.DoesNotExist:
# Create a new user
username_unique = slugify_uniquely(username, user_model, slugfield="username")
user = user_model.objects.create(email=email,
username=username_unique,
full_name=full_name,
bio=bio)
auth_data_model.objects.create(user=user, key="google", value=google_id, extra={})
send_register_email(user)
user_registered_signal.send(sender=user.__class__, user=user)
if token:
membership = get_membership_by_token(token)
membership.user = user
membership.save(update_fields=["user"])
return user | Register a new user from google.
This can raise `exc.IntegrityError` exceptions in
case of conflics found.
:returns: User |
def crystalfield(interaction=np.linspace(0, 20, 201), \
j_hund=np.linspace(0, 0.35, 71)):
slsp = Spinon(slaves=6, orbitals=3, hopping=[0.5]*6, \
populations=[1, 1, 1.5, 1.5, 1.5, 1.5])
zet = []
for hund_cu in j_hund:
zet.append(ssplt.solve_loop(slsp, interaction, hund_cu)[0][0])
np.savez('PRB_83_205112', zeta=zet, u_int=interaction, j_hund=j_hund) | Aimed at reproducing the figure in paper
L. de'Medici, PRB 83,205112 (2011)
showing the phase diagram of a 3 band hubbard with one lifted band
fixed population 1:1.5,1.5 |
def show_feature(user, feature):
FeatureFlipper = get_feature_model()
return FeatureFlipper.objects.show_feature(user, feature) | Return True/False whether the assigned feature can be displayed. This is
primarily used in the template tag to determine whether to render the
content inside itself. |
def do_flipper(parser, token):
nodelist = parser.parse(('endflipper',))
tag_name, user_key, feature = token.split_contents()
parser.delete_first_token()
return FlipperNode(nodelist, user_key, feature) | The flipper tag takes two arguments: the user to look up and the feature
to compare against. |
def render(self, context):
user = self._get_value(self.user_key, context)
feature = self._get_value(self.feature, context)
if feature is None:
return ''
allowed = show_feature(user, feature)
return self.nodelist.render(context) if allowed else '' | Handle the actual rendering. |
def _get_value(self, key, context):
string_quotes = ('"', "'")
if key[0] in string_quotes and key[-1] in string_quotes:
return key[1:-1]
if key in string.digits:
return int(key)
return context.get(key, None) | Works out whether key is a value or if it's a variable referencing a
value in context and returns the correct value. |
def client(self, client_name, **params):
if client_name not in self.cfg.clients:
raise OAuthException('Unconfigured client: %s' % client_name)
if client_name not in ClientRegistry.clients:
raise OAuthException('Unsupported services: %s' % client_name)
params = dict(self.cfg.clients[client_name], **params)
return ClientRegistry.clients[client_name](**params) | Initialize OAuth client from registry. |
def refresh(self, client_name, refresh_token, **params):
client = self.client(client_name, logger=self.app.logger)
return client.get_access_token(refresh_token, grant_type='refresh_token', **params) | Get refresh token.
:param client_name: A name one of configured clients
:param redirect_uri: An URI for authorization redirect
:returns: a coroutine |
def chain(*args):
has_iter = partial(hasattr, name='__iter__')
# check if a single iterable is being passed for
# the case that it's a generator of generators
if len(args) == 1 and hasattr(args[0], '__iter__'):
args = args[0]
for arg in args:
# if the arg is iterable
if hasattr(arg, '__iter__'):
# iterate through it
for i in arg:
yield i
# otherwise
else:
# yield the whole argument
yield arg | itertools.chain, just better |
def get_all_celcius_commands():
p = subprocess.Popen(["crontab", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return [x for x in out.split('\n') if 'CJOBID' in x] | Query cron for all celcius commands |
def create_option_set(cls, option_set, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_option_set_with_http_info(option_set, **kwargs)
else:
(data) = cls._create_option_set_with_http_info(option_set, **kwargs)
return data | Create OptionSet
Create a new OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_option_set(option_set, async=True)
>>> result = thread.get()
:param async bool
:param OptionSet option_set: Attributes of optionSet to create (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread. |
def delete_option_set_by_id(cls, option_set_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_option_set_by_id_with_http_info(option_set_id, **kwargs)
else:
(data) = cls._delete_option_set_by_id_with_http_info(option_set_id, **kwargs)
return data | Delete OptionSet
Delete an instance of OptionSet by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_option_set_by_id(option_set_id, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. |
def get_option_set_by_id(cls, option_set_id, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_option_set_by_id_with_http_info(option_set_id, **kwargs)
else:
(data) = cls._get_option_set_by_id_with_http_info(option_set_id, **kwargs)
return data | Find OptionSet
Return single instance of OptionSet by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_option_set_by_id(option_set_id, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to return (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread. |
def list_all_option_sets(cls, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_option_sets_with_http_info(**kwargs)
else:
(data) = cls._list_all_option_sets_with_http_info(**kwargs)
return data | List OptionSets
Return a list of OptionSets
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_option_sets(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OptionSet]
If the method is called asynchronously,
returns the request thread. |
def replace_option_set_by_id(cls, option_set_id, option_set, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
else:
(data) = cls._replace_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
return data | Replace OptionSet
Replace all attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to replace (required)
:param OptionSet option_set: Attributes of optionSet to replace (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread. |
def update_option_set_by_id(cls, option_set_id, option_set, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
else:
(data) = cls._update_option_set_by_id_with_http_info(option_set_id, option_set, **kwargs)
return data | Update OptionSet
Update attributes of OptionSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_option_set_by_id(option_set_id, option_set, async=True)
>>> result = thread.get()
:param async bool
:param str option_set_id: ID of optionSet to update. (required)
:param OptionSet option_set: Attributes of optionSet to update. (required)
:return: OptionSet
If the method is called asynchronously,
returns the request thread. |
def VcardFieldsEqual(field1, field2):
field1_vals = set([ str(f.value) for f in field1 ])
field2_vals = set([ str(f.value) for f in field2 ])
if field1_vals == field2_vals:
return True
else:
return False | Handle comparing vCard fields where inputs are lists of components.
Handle parameters? Are any used aside from 'TYPE'?
Note: force cast to string to compare sub-objects like Name and Address |
def VcardMergeListFields(field1, field2):
field_dict = {}
for f in field1 + field2:
field_dict[str(f)] = f
return list(field_dict.values()) | Handle merging list fields that may include some overlap. |
def SetVcardField(new_vcard, field_name, values):
for val in values:
new_field = new_vcard.add(field_name)
new_field.value = val.value
if val.params:
new_field.params = val.params
return new_vcard | Set vCard field values and parameters on a new vCard. |
def CopyVcardFields(new_vcard, auth_vcard, field_names):
for field in field_names:
value_list = auth_vcard.contents.get(field)
new_vcard = SetVcardField(new_vcard, field, value_list)
return new_vcard | Copy vCard field values from an authoritative vCard into a new one. |
def MergeVcards(vcard1, vcard2):
new_vcard = vobject.vCard()
vcard1_fields = set(vcard1.contents.keys())
vcard2_fields = set(vcard2.contents.keys())
mutual_fields = vcard1_fields.intersection(vcard2_fields)
logger.debug('Potentially conflicting fields: {}'.format(mutual_fields))
for field in mutual_fields:
val1 = vcard1.contents.get(field)
val2 = vcard2.contents.get(field)
new_values = []
if not VcardFieldsEqual(val1, val2):
# we have a conflict, if a list maybe append otherwise prompt user
if field not in MERGEABLE_FIELDS:
context_str = GetVcardContextString(vcard1, vcard2)
new_values.extend(SelectFieldPrompt(field,
context_str,
val1,
val2))
else:
new_values.extend(VcardMergeListFields(val1, val2))
else:
new_values.extend(val1)
logger.debug('Merged values for field {}: {}'.format(
field.upper(),
u(str(new_values)))
)
new_vcard = SetVcardField(new_vcard, field, new_values)
new_vcard = CopyVcardFields(new_vcard,
vcard1,
vcard1_fields - vcard2_fields)
new_vcard = CopyVcardFields(new_vcard,
vcard2,
vcard2_fields - vcard1_fields)
return new_vcard | Create a new vCard and populate it. |
def SelectFieldPrompt(field_name, context_str, *options):
option_format_str = '[ {} ] "{}"'
option_dict = {}
print(context_str)
print('Please select one of the following options for field "{}"'.format(
field_name)
)
for cnt, option in enumerate(options):
option_dict['{}'.format(cnt + 1)] = option
if not callable(option):
print(option_format_str.format(cnt + 1, u(str(option))))
else:
print(option_format_str.format(cnt + 1, option.__name__))
choice = None
while choice not in option_dict:
choice = input('option> ').strip()
new_value = option_dict[choice]
if callable(new_value):
return new_value()
else:
return new_value | Prompts user to pick from provided options.
It is possible to provide a function as an option although it is
not yet tested. This could allow a user to be prompted to provide
their own value rather than the listed options.
Args:
field_name (string): Name of the field.
context_str (string): Printed to give the user context.
options: Variable arguments, should be vobject Components
in a list. As retrieved from a vCard.contents dictionary.
Returns:
One of the options passed in. Ideally always a list. |
def make_fixture(model_class, **kwargs):
all_fields = get_fields(model_class)
fields_for_random_generation = map(
lambda x: getattr(model_class, x), all_fields
)
overrides = {}
for kwarg, value in kwargs.items():
if kwarg in all_fields:
kwarg_field = getattr(model_class, kwarg)
fields_for_random_generation.remove(kwarg_field)
overrides.update({kwarg_field: value})
random_values = get_random_values(fields_for_random_generation)
values = dict(overrides, **random_values)
assert len(all_fields) == len(values), (
"Mismatch in values, {} != {}".format(
len(all_fields), len(values)
)
)
data = {k.name: v for k, v in values.items()}
return model_class(**data) | Take the model_klass and generate a fixure for it
Args:
model_class (MongoEngine Document): model for which a fixture
is needed
kwargs (dict): any overrides instead of random values
Returns:
dict for now, other fixture types are not implemented yet |
def get_fields(model_class):
return [
attr for attr, value in model_class.__dict__.items()
if issubclass(type(value), (mongo.base.BaseField, mongo.EmbeddedDocumentField)) # noqa
] | Pass in a mongo model class and extract all the attributes which
are mongoengine fields
Returns:
list of strings of field attributes |
def get_random_values(fields):
values = {}
for field in fields:
try:
value = get_random_value(field)
except AttributeError:
# this can only really occur if the field is not implemented yet.
# Silencing the exception during the prototype phase
value = None
values.update({field: value})
return values | Pass in a list of fields (as strings) to get a dict with the
field name as a key and a randomly generated value as another |
def head_bucket(self, name):
try:
self.s3.head_bucket(Bucket=name)
info = self.s3.get_bucket_website(Bucket=self.sitename)
if not info:
return False, 404, "Configure improrperly"
return True, None, None
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["403", "404"]:
return False, e.response["Error"]["Code"], e.response["Error"]["Message"]
else:
raise e | Check if a bucket exists
:param name:
:return: |
def purge_files(self, exclude_files=["index.html", "error.html"]):
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass | To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return: |
def create_manifest_from_s3_files(self):
for k in self.s3.list_objects(Bucket=self.sitename)['Contents']:
key = k["Key"]
files = []
if key not in [self.manifest_file]:
files.append(key)
self._set_manifest_data(files) | To create a manifest db for the current
:return: |
def _set_manifest_data(self, files_list):
if files_list:
data = ",".join(files_list)
self.s3.put_object(Bucket=self.sitename,
Key=self.manifest_file,
Body=data,
ACL='private') | Write manifest files
:param files_list: list
:return: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.