message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
config_service: Removed hotlist from feedback link.
Since our hotlist is not publicly writable, we need to remove the hotlist
from the feedback link
Review-Url: | crdx('setFeedbackButtonLink', 'https://bugs.chromium.org/p/chromium/issues/' +
'[email protected],%[email protected],' +
'%[email protected],%[email protected]' +
- '&components=Infra%3EPlatform%3EConfig&labels=Infra-DX&blocking=730832' +
- '&hotlists=luci-config');
+ '&components=Infra%3EPlatform%3EConfig&labels=Infra-DX&blocking=730832');
</script>
</html>
|
Add/refactor unit tests for the CloudFormation API wrapper
SIM:
CR: | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import unittest
import mock
+from pytest_socket import disable_socket, enable_socket
+import unittest
-from ebcli.lib import aws, cloudformation
+from ebcli.lib import cloudformation
+
+from .. import mock_responses
class TestCloudFormation(unittest.TestCase):
+ def setUp(self):
+ disable_socket()
+
+ def tearDown(self):
+ enable_socket()
- def test_wait_until_stack_exists__stack_does_not_exist(self):
- describe_stacks_response = {
- 'Stacks': [
- {
- 'StackId': 'stack_id',
- 'StackName': 'stack_name',
- 'Description': "my cloud formation stack",
- 'Parameters': []
- }
- ]
- }
- aws.make_api_call = mock.MagicMock(return_value=describe_stacks_response)
+ @mock.patch('ebcli.lib.cloudformation.aws.make_api_call')
+ def test_wait_until_stack_exists__stack_does_not_exist(
+ self,
+ make_api_call_mock
+ ):
+ make_api_call_mock.return_value = mock_responses.DESCRIBE_STACKS_RESPONSE
with self.assertRaises(cloudformation.CFNTemplateNotFound) as exception:
cloudformation.wait_until_stack_exists('non_existent_stack', timeout=0)
@@ -39,17 +41,23 @@ class TestCloudFormation(unittest.TestCase):
"Could not find CFN stack, 'non_existent_stack'."
)
- def test_wait_until_stack_exists__stack_exists(self):
- describe_stacks_response = {
- 'Stacks': [
- {
- 'StackId': 'stack_id',
- 'StackName': 'stack_name',
- 'Description': "my cloud formation stack",
- 'Parameters': []
- }
- ]
- }
- aws.make_api_call = mock.MagicMock(return_value=describe_stacks_response)
+ @mock.patch('ebcli.lib.cloudformation.aws.make_api_call')
+ def test_wait_until_stack_exists__stack_exists(
+ self,
+ make_api_call_mock
+ ):
+ make_api_call_mock.return_value = mock_responses.DESCRIBE_STACKS_RESPONSE
cloudformation.wait_until_stack_exists('stack_name')
+
+ @mock.patch('ebcli.lib.cloudformation.aws.make_api_call')
+ def test_get_template(
+ self,
+ make_api_call_mock
+ ):
+ make_api_call_mock.return_value = mock_responses.GET_TEMPLATE_RESPONSE
+
+ self.assertEqual(
+ mock_responses.GET_TEMPLATE_RESPONSE,
+ cloudformation.get_template('mystackname')
+ )
|
Commentary about size constraints on TensorImpl.
Summary:
Pull Request resolved: | @@ -1311,4 +1311,50 @@ protected:
bool reserved_ = false;
};
+
+// Note [TensorImpl size constraints]
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+// Changed the size of TensorImpl? If the size went down, good for
+// you! Adjust the documentation below and the expected size.
+// Did it go up? Read on...
+//
+// Struct size matters. In some production systems at Facebook, we have
+// 400M live tensors during a training run. Do the math: every 64-bit
+// word you add to Tensor is an extra 3.2 gigabytes in RAM.
+//
+// If you are a Facebook employee, you can check if the run in question
+// has tipped you over the point using the command here:
+// https://fburl.com/q5enpv98
+//
+// For reference, we OOMed at 160 bytes (20 words) per TensorImpl.
+// This is not counting overhead from strides out-of-line allocation, and
+// StorageImpl space. We're currently comfortably under this number;
+// let's keep it that way. (One currently approved pending size
+// increase is inlining sizes and strides as small vectors, to reduce
+// dynamic allocations.)
+//
+// Our memory usage on 32-bit systems is suboptimal, but we're not checking
+// for it at the moment (to help avoid rage inducing cycles when the
+// 32-bit number is wrong).
+//
+// Current breakdown:
+//
+// vtable pointer
+// strong refcount TODO: pack these into one word
+// weak refcount
+// storage pointer
+// sizes vector (start)
+// sizes vector (end)
+// sizes vector (reserved) TODO: get rid of me
+// strides pointer
+// storage offset
+// numel
+// data type pointer
+// miscellaneous bitfield
+//
+static_assert(sizeof(void*) != sizeof(int64_t) || // if 64-bit...
+ sizeof(TensorImpl) == sizeof(int64_t) * 12,
+ "You changed the size of TensorImpl on 64-bit arch."
+ "See Note [TensorImpl size constraints] on how to proceed.");
+
} // namespace at
|
Fix
Turns out the issue was in the way lambdas work internally upon having an empty stack. | @@ -229,9 +229,13 @@ def _safe_apply(function, *args):
'''
args = reverse(args)
if function.__name__ == "_lambda":
- return function(list(args), len(args))[-1]
+ ret = function(list(args), len(args))
+ if len(ret): return ret[-1]
+ else: return []
elif function.__name__.startswith("FN_"):
- return function(list(args))[-1]
+ ret = function(list(args))[-1]
+ if len(ret): return ret[-1]
+ else: return []
return function(*args)
def _two_argument(function, lhs, rhs):
'''
@@ -2009,9 +2013,10 @@ else:
compiled += tab("context_values.append(parameters);") + NEWLINE
compiled += tab("input_values[input_level] = [stack[::], 0]") + NEWLINE
compiled += tab(VY_compile(VALUE[VyParse.LAMBDA_BODY])) + NEWLINE
+ compiled += tab("ret = [pop(stack)]") + NEWLINE
compiled += tab("context_level -= 1; context_values.pop()") + NEWLINE
compiled += tab("input_level -= 1;") + NEWLINE
- compiled += tab("return [stack[-1]]") + NEWLINE
+ compiled += tab("return ret") + NEWLINE
compiled += "stack.append(_lambda)"
elif NAME == VyParse.LIST_STMT:
compiled += "temp_list = []" + NEWLINE
|
Made js-console work with windows
Got rid of the condition where the process is considered to be done when stdout or stderr outputs | @@ -214,10 +214,6 @@ def execjs(js, jslib, timeout=None, force_docker_pull=False, debug=False, js_con
error_thread.daemon=True
error_thread.start()
- # mark if output/error is ready
- output_ready=False
- error_ready=False
-
while (len(wselect) + len(rselect)) > 0:
try:
if nodejs.stdin in wselect:
@@ -227,29 +223,16 @@ def execjs(js, jslib, timeout=None, force_docker_pull=False, debug=False, js_con
wselect = []
if nodejs.stdout in rselect:
if not output_queue.empty():
- output_ready = True
stdout_buf.write(output_queue.get())
- elif output_ready:
- rselect = []
- no_more_output.release()
- no_more_error.release()
- output_thread.join()
if nodejs.stderr in rselect:
if not error_queue.empty():
- error_ready = True
stderr_buf.write(error_queue.get())
- elif error_ready:
- rselect = []
- no_more_output.release()
- no_more_error.release()
- output_thread.join()
- error_thread.join()
+
if stdout_buf.getvalue().endswith("\n".encode()):
rselect = []
no_more_output.release()
no_more_error.release()
- output_thread.join()
except OSError as e:
break
|
Bugfix: close() would sometimes hang for RtMidi input ports.
This change was somehow not included in the earlier commit that claimed to
have fixed this. | @@ -169,6 +169,7 @@ class Port(ports.BaseIOPort):
self._midiin.ignore_types(False, False, True)
self._queue = queue.Queue()
self._parser = Parser()
+ if self.callback is not None:
self.callback = callback
if is_output:
|
Minor: add static_assert to Pickler buffering.
Summary:
Pull Request resolved:
This is followup on the pickler buffering change.
ghstack-source-id:
Test Plan: This just adds an static assert, hence if it builds, we're good. | @@ -201,12 +201,14 @@ class Pickler {
// the left of a '::', its type cannot be deduced by the compiler so one must
// explicitly instantiate the template, i.e. push<int>(int) works, push(int)
// does not)
+ static constexpr size_t kBufferSize = 256;
template <typename T>
void push(typename std::common_type<T>::type value) {
const char* begin = reinterpret_cast<const char*>(&value);
if (bufferPos_ + sizeof(T) > buffer_.size()) {
flushNonEmpty();
}
+ static_assert(sizeof(T) <= kBufferSize, "Buffer size assumption");
memcpy(buffer_.data() + bufferPos_, begin, sizeof(T));
bufferPos_ += sizeof(T);
}
@@ -216,7 +218,7 @@ class Pickler {
std::function<void(const char*, size_t)> writer_;
// Buffer to avoid calling a writer_ on a per-byte basis.
- std::array<char, 256> buffer_;
+ std::array<char, kBufferSize> buffer_;
size_t bufferPos_{0};
// Stack of opcodes/data
|
Added thresholding option to correlation mode
Relates to | @@ -1463,7 +1463,7 @@ class UHFQC_correlation_detector(UHFQC_integrated_average_detector):
def __init__(self, UHFQC, AWG=None, integration_length=1e-6,
nr_averages=1024, rotate=False, real_imag=True,
channels=[0, 1], correlations=[(0, 1)],
- seg_per_point=1, single_int_avg=False,
+ seg_per_point=1, single_int_avg=False, thresholding=False,
**kw):
super(UHFQC_correlation_detector, self).__init__(
UHFQC, AWG=AWG, integration_length=integration_length,
@@ -1473,6 +1473,7 @@ class UHFQC_correlation_detector(UHFQC_integrated_average_detector):
cross_talk_suppression=False,
**kw)
self.correlations = correlations
+ self.thresholding = thresholding
self.value_names = []
for ch in channels:
@@ -1530,8 +1531,13 @@ class UHFQC_correlation_detector(UHFQC_integrated_average_detector):
self.correlation_channels += [correlation_channel]
def set_up_correlation_weights(self):
- # FIXME: add option to use thresholding (quex_rl_soure(5))
- self.UHFQC.quex_rl_source(4) # -> correlations mode before threshold
+ if self.thresholding:
+ # correlations mode after threshold
+ # NOTE: thresholds need to be set outside the detctor object.
+ self.UHFQC.quex_rl_source(5)
+ else:
+ # correlations mode before threshold
+ self.UHFQC.quex_rl_source(4)
# Configure correlation mode
for correlation_channel, corr in zip(self.correlation_channels,
self.correlations):
@@ -1552,12 +1558,17 @@ class UHFQC_correlation_detector(UHFQC_integrated_average_detector):
self.UHFQC.set('quex_corr_{}_mode'.format(correlation_channel), 1)
self.UHFQC.set('quex_corr_{}_source'.format(correlation_channel),
corr[1])
+ # If thresholding is enabled, set the threshold for the correlation
+ # channel.
+ if self.thresholding:
+ thresh_level = \
+ self.UHFQC.get('quex_thres_{}_level'.format(corr[0]))
+ self.UHFQC.set(
+ 'quex_thres_{}_level'.format(correlation_channel),
+ thresh_level)
def get_values(self):
# Slightly different way to deal with scaling factor
- self.scaling_factor = 1 / \
- (1.8e9*self.integration_length*self.nr_averages)
-
self.scaling_factor = 1 / (1.8e9*self.integration_length)
if self.AWG is not None:
@@ -1573,13 +1584,17 @@ class UHFQC_correlation_detector(UHFQC_integrated_average_detector):
acquisition_time=0.01)
data = []
+ if self.thresholding:
+ for key in data_raw.keys():
+ data.append(np.array(data_raw[key]))
+ else:
for key in data_raw.keys():
if key in self.correlation_channels:
- data.append(np.array(data_raw[key]) * (self.scaling_factor**2 /
- self.nr_averages))
+ data.append(np.array(data_raw[key]) *
+ (self.scaling_factor**2 / self.nr_averages))
else:
- data.append(np.array(data_raw[key]) * (self.scaling_factor /
- self.nr_averages))
+ data.append(np.array(data_raw[key]) *
+ (self.scaling_factor / self.nr_averages))
if not self.real_imag:
I = data[0]
|
Update server requirements
Via and | @@ -31,9 +31,9 @@ You can deploy and run TiDB on the 64-bit generic hardware server platform in th
| Component | CPU | Memory | Local Storage | Network | Instance Number (Minimum Requirement) |
| :------: | :-----: | :-----: | :----------: | :------: | :----------------: |
-| TiDB | 16 core+ | 16 GB+ | SAS, 200 GB+ | Gigabit network card | 1 |
-| PD | 16 core+ | 16 GB+ | SAS, 200 GB+ | Gigabit network card | - |
-| TiKV | 16 core+ | 32 GB+ | SAS, 200 GB+ | Gigabit network card | 3 |
+| TiDB | 8 core+ | 16 GB+ | SAS, 200 GB+ | Gigabit network card | 1 (can be deployed on the same machine with PD) |
+| PD | 8 core+ | 16 GB+ | SAS, 200 GB+ | Gigabit network card | 1 (can be deployed on the same machine with TiDB) |
+| TiKV | 8 core+ | 32 GB+ | SAS, 200 GB+ | Gigabit network card | 3 |
| | | | | Total Server Number | 4 |
> **Note**:
@@ -43,19 +43,19 @@ You can deploy and run TiDB on the 64-bit generic hardware server platform in th
### Production environment
-| Component | CPU | Memory | Hard Disk Type | Single Hard Disk Size | Network | Instance Number (Minimum Requirement) |
-| :-----: | :------: | :------: | :------: | :------: | :------: | :-----: |
-| TiDB | 32 core+ | 128 GB+ | SSD | 500 GB+ | 2 or more 10 Gigabit network cards | 2 |
-| PD | 16 core+ | 32 GB+ | SSD | 200 GB+ | 2 or more 10 Gigabit network cards | 3 |
-| TiKV | 32 core+ | 128 GB+ | SSD | 200~500 GB. | 2 or more 10 Gigabit network cards | 3 |
-| Monitor | 16 core+ | 32 GB+ | SAS | 200 GB+ | 2 or more Gigabit network cards | 1 |
+| Component | CPU | Memory | Hard Disk Type | Network | Instance Number (Minimum Requirement) |
+| :-----: | :------: | :------: | :------: | :------: | :-----: |
+| TiDB | 16 core+ | 48 GB+ | SAS | 10 Gigabit network card (2 preferred) | 2 |
+| PD | 8 core+ | 16 GB+ | SSD | 10 Gigabit network card (2 preferred) | 3 |
+| TiKV | 16 core+ | 48 GB+ | SSD | 10 Gigabit network card (2 preferred) | 3 |
+| Monitor | 8 core+ | 16 GB+ | SAS | Gigabit network card | 1 |
| | | | | | Total Server Number | 9 |
> **Note**:
>
> - In the production environment, you can deploy and run TiDB and PD on the same server. If you have a higher requirement for performance and reliability, try to deploy them separately.
> - It is strongly recommended to use higher configuration in the production environment.
-> - It is recommended to keep the size of TiKV hard disk within 500G in case it takes too long to restore data when the hard disk is damaged.
+> - It is recommended to keep the size of TiKV hard disk within 800G in case it takes too long to restore data when the hard disk is damaged.
## Network requirements
|
Add --self option to tlmgr update command
Also updates tlmgr infrastructure | @@ -88,4 +88,4 @@ a good idea to periodically update the TeXLive packages by running:
.. code-block:: shell
- /opt/texlive/bin/x86_64-linux/tlmgr update --all
+ /opt/texlive/bin/x86_64-linux/tlmgr update --self --all
|
Add reviewers filter
Only includes those that have left a review | @@ -77,6 +77,12 @@ def get_round_leads(request):
return User.objects.filter(round_lead__isnull=False).distinct()
+def get_reviewers(request):
+ """ All users that have left a review """
+ User = get_user_model()
+ return User.objects.filter(review__isnull=False).distinct()
+
+
class Select2CheckboxWidgetMixin(filters.Filter):
def __init__(self, *args, **kwargs):
label = kwargs.get('label')
@@ -97,6 +103,7 @@ class SubmissionFilter(filters.FilterSet):
funds = Select2ModelMultipleChoiceFilter(name='page', queryset=get_used_funds, label='Funds')
status = Select2MultipleChoiceFilter(name='status__contains', choices=status_options, label='Statuses')
lead = Select2ModelMultipleChoiceFilter(queryset=get_round_leads, label='Leads')
+ reviewers = Select2ModelMultipleChoiceFilter(queryset=get_reviewers, label='Reviewers')
class Meta:
model = ApplicationSubmission
|
fix(cz/conventional_commits): optionally expect '!' right before ':' in schema_pattern
Closes: | @@ -178,8 +178,8 @@ class ConventionalCommitsCz(BaseCommitizen):
def schema_pattern(self) -> str:
PATTERN = (
- r"(build|ci|docs|feat|fix|perf|refactor|style|test|chore|revert|bump)!?"
- r"(\(\S+\))?:(\s.*)"
+ r"(build|ci|docs|feat|fix|perf|refactor|style|test|chore|revert|bump)"
+ r"(\(\S+\))?!?:(\s.*)"
)
return PATTERN
|
Fixing TTSN unit tests
got lost in rebase | @@ -796,8 +796,8 @@ Caffe2Ops Caffe2Backend::CreateMatMul(OnnxNode* onnx_node, int opset_version) {
//==============================================
// Rest of the member funtions for Caffe2Backend
//==============================================
-std::unordered_set<std::string> Caffe2Backend::AllNamesInGraph(
- const GraphProto& graph) {
+std::unordered_set<std::string>
+Caffe2Backend::AllNamesInGraph(const GraphProto &graph) {
std::unordered_set<std::string> names;
for (const auto& input : graph.input()) {
|
Change it back to checking just the statuses but have it write out the
result so people can see the reason in case of failures | @@ -91,6 +91,9 @@ def test_checks_api(dcos_api_session):
# check that the returned statuses of each check is 0
expected_status = {c: 0 for c in checks.keys()}
response_status = {c: v['status'] for c, v in results['checks'].items()}
+
+ # print out the response for debugging
+ logging.info('Response: {}'.format(results))
assert expected_status == response_status
# check that overall status is also 0
|
Point preview statsd at tools
We are running a statsd exporter on tools to collect all our statsd
metrics for scraping by Prometheus
Update preview to point there instead of at the local one which has
issues with redeployment and DNS changing | @@ -70,7 +70,7 @@ applications:
AWS_SECRET_ACCESS_KEY: '{{ AWS_SECRET_ACCESS_KEY }}'
{% if environment == 'preview' %}
- STATSD_HOST: "notify-statsd-exporter-{{ environment }}.apps.internal"
+ STATSD_HOST: "statsd.notify.tools"
STATSD_PREFIX: ""
{% else %}
STATSD_HOST: "statsd.hostedgraphite.com"
|
Fix crash in WaitingPeers.put_nowait() when peer is not alive
Closes: | @@ -16,6 +16,7 @@ from eth_utils import (
)
from p2p.abc import CommandAPI
+from p2p.exceptions import PeerConnectionLost
from p2p.exchange import PerformanceAPI
from trinity.protocol.common.peer import BaseChainPeer
@@ -79,7 +80,11 @@ class WaitingPeers(Generic[TChainPeer]):
return sum(scores) / len(scores)
def put_nowait(self, peer: TChainPeer) -> None:
- self._waiting_peers.put_nowait(self._peer_wrapper(peer))
+ try:
+ wrapped_peer = self._peer_wrapper(peer)
+ except PeerConnectionLost:
+ return
+ self._waiting_peers.put_nowait(wrapped_peer)
async def get_fastest(self) -> TChainPeer:
wrapped_peer = await self._waiting_peers.get()
|
Add FIXME
Summary: Add a cross-reference to the subselection Github issues
Test Plan: N/A
Reviewers: schrockn, alangenfeld, prha | @@ -208,6 +208,11 @@ def multiprocess_executor(init_context):
check_cross_process_constraints(init_context)
+ # ExecutionTargetHandle.get_handle returns an ExecutionTargetHandleCacheEntry, which is a tuple
+ # (handle, solid_subset). Right now we are throwing away the solid_subset that we store in the
+ # cache -- this is fragile and we should fix this with
+ # https://github.com/dagster-io/dagster/issues/2115 and friends so there are not multiple
+ # sources of truth for the solid subset
handle, _ = ExecutionTargetHandle.get_handle(init_context.pipeline_def)
return MultiprocessExecutorConfig(
handle=handle,
|
Allow the use of sub-batching when using regularizers
The regularizer is reset after every sub-batch. This frees the computational graph and allow a new backward pass for the next sub-batch. | @@ -234,6 +234,9 @@ class TrainingLoop(ABC):
loss.backward()
current_epoch_loss += loss.item()
+ # reset the regularizer to free the computational graph
+ self.model.regularizer.reset()
+
# update parameters according to optimizer
self.optimizer.step()
|
Adds enable_old_python_results_unpickling() function.
Updates the module-name-manipulation that is needed for old-version
Results pickles to load and gathers this logic into a new
enable_old_python_results_unpickling function which now must be
called before old-version Results pickles will load. | @@ -400,14 +400,16 @@ class Results(object):
' Please update this call with one to:\n'
' pygsti.report.create_general_report(...)\n'))
-class ResultOptions(object):
- """ Unused. Exists for sole purpose of loading old Results pickles """
- pass
+
+def enable_old_python_results_unpickling():
#Define empty ResultCache class in resultcache module to enable loading old Results pickles
import sys as _sys
class dummy_ResultCache(object): pass
+ class dummy_ResultOptions(object): pass
class dummy_resultcache_module(object):
def __init__(self):
self.ResultCache = dummy_ResultCache
+ _sys.modules[__name__].ResultOptions = dummy_ResultOptions
_sys.modules['pygsti.report.resultcache'] = dummy_resultcache_module()
+ _sys.modules['pygsti.report.results'] = _sys.modules[__name__]
|
ExplicitConsdes: call dtors if marker before exit
We need to make sure the called function is not _exit or _Exit,
as atexit-registered functions are not called in these cases. | @@ -204,12 +204,29 @@ class ExplicitConsdes : public ModulePass {
if (!called)
continue;
- if (isMarkExit(called)) {
- dtorsBefore.push_back(call);
- }
+ if (!isExit(called))
+ continue;
- if (isExit(called) && !hasMarkExit) {
+ if (!hasMarkExit) {
dtorsBefore.push_back(call);
+ } else {
+ // look for __INSTR_mark_exit before this call
+ auto *prev = inst.getPrevNonDebugInstruction();
+ auto *markCall = dyn_cast<CallInst>(prev);
+ if (!markCall)
+ continue;
+
+ if (markCall->isIndirectCall())
+ continue;
+
+ Function *markCalled = markCall->getCalledFunction();
+ if (!markCalled)
+ continue;
+
+ if (!isMarkExit(markCalled))
+ continue;
+
+ dtorsBefore.push_back(markCall);
}
}
}
|
[batch] handle events with no operation
These are always delete operations that have failed due to the worker not
existing. | @@ -556,7 +556,13 @@ journalctl -u docker.service > dockerd.log
log.warning(f'unknown event resource type {resource_type}')
return
- operation_started = event['operation'].get('first', False)
+ operation = event.get('operation')
+ if operation is None:
+ # occurs when deleting a worker that does not exist
+ log.info(f'received an event with no operation {json.dumps(event)}')
+ return
+
+ operation_started = operation.get('first', False)
if operation_started:
event_type = 'STARTED'
else:
@@ -622,9 +628,12 @@ timestamp >= "{mark}"
'pageSize': 100,
'filter': filter
}):
+ try:
# take the last, largest timestamp
new_mark = event['timestamp']
await self.handle_event(event)
+ except Exception as exc:
+ raise ValueError(f'exception while handling {json.dumps(event)}') from exc
if new_mark is not None:
await self.db.execute_update(
|
Disable deploy pipeline
Summary: Disabling this until we're ready to consume the artifacts produced by the Scala builds
Test Plan: n/a
Reviewers: max, alangenfeld, prha, schrockn | @@ -523,10 +523,6 @@ def coverage_step():
)
-def deploy_trigger_step():
- return {'label': 'Deploy Trigger', 'trigger': 'deploy', 'branches': 'master', 'async': True}
-
-
def pylint_steps():
res = []
@@ -651,9 +647,7 @@ def releasability_tests():
steps += releasability_tests()
if DO_COVERAGE:
- steps += [wait_step(), coverage_step(), wait_step(), deploy_trigger_step()]
- else:
- steps += [wait_step(), deploy_trigger_step()]
+ steps += [wait_step(), coverage_step()]
print(
yaml.dump(
|
Added missing ESP f/w for Kona EV
Stoteler##4896 DongleID/route 9e02651652ef6c6f|2021-02-15--10-15-28 | @@ -339,7 +339,10 @@ FW_VERSIONS = {
(Ecu.transmission, 0x7e1, None): [b'\xf1\x816U2VE051\x00\x00\xf1\x006U2V0_C2\x00\x006U2VE051\x00\x00DOS4T16NS3\x00\x00\x00\x00', ],
},
CAR.KONA_EV: {
- (Ecu.esp, 0x7D1, None): [b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000\xf1\xa01.05', ],
+ (Ecu.esp, 0x7D1, None): [
+ b'\xf1\x00OS IEB \r 105\x18\t\x18 58520-K4000\xf1\xa01.05',
+ b'\xf1\x00OS IEB \x03 212 \x11\x13 58520-K4000\xf1\xa02.12',
+ ],
(Ecu.fwdCamera, 0x7C4, None): [b'\xf1\x00OSE LKAS AT EUR LHD 1.00 1.00 95740-K4100 W40', ],
(Ecu.eps, 0x7D4, None): [b'\xf1\x00OS MDPS C 1.00 1.04 56310K4050\x00 4OEDC104', ],
(Ecu.fwdRadar, 0x7D0, None): [b'\xf1\x00OSev SCC F-CUP 1.00 1.01 99110-K4000 \xf1\xa01.01', ],
|
Update smokeloader.txt
>nymaim | @@ -1567,12 +1567,6 @@ privacy-tools-for-you-802.com
privacy-tools-for-you-900.com
privacy-tools-for-you-901.com
-# Reference: https://www.virustotal.com/gui/file/0003d39fdeaf2d242c347bc8bf5d8bebe911897349e1406cbb6e219d5c831cd7/detection
-
-http://107.182.129.235
-http://171.22.30.106
-http://85.31.46.167
-
# Generic trails
/advlogs9579/
|
NL Capacity Update
No source update required | ]
],
"capacity": {
- "biomass": 1243,
- "coal": 4631,
- "gas": 15570,
+ "biomass": 1280,
+ "coal": 4000,
+ "gas": 15496,
+ "geothermal": 0,
"hydro": 38,
"hydro storage": 0,
"nuclear": 486,
- "solar": 3937,
- "wind": 4626
+ "oil": 0,
+ "solar": 5710,
+ "wind": 4930
},
"contributors": [
"https://github.com/corradio",
- "https://github.com/PaulCornelissen"
+ "https://github.com/PaulCornelissen",
+ "https://github.com/nessie2013"
],
"parsers": {
"consumption": "ENTSOE.fetch_consumption",
|
test_signup: Attach prereg_user to confirmation obj.
Tests attached a UserProfile to confirmation objects,
which is not very valid as this is the only place
where this is done. Now we attach PreregUser to
the confirmation object, making the tests correct. | @@ -1467,8 +1467,12 @@ so we didn't send them an invitation. We did send invitations to everyone else!"
# make sure users can't take a valid confirmation key from another
# pathway and use it with the invitation url route
def test_confirmation_key_of_wrong_type(self) -> None:
- user = self.example_user('hamlet')
- url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
+ email = self.nonreg_email("alice")
+ realm = get_realm('zulip')
+ inviter = self.example_user('iago')
+ prereg_user = PreregistrationUser.objects.create(
+ email=email, referred_by=inviter, realm=realm)
+ url = create_confirmation_link(prereg_user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
# Mainly a test of get_object_from_key, rather than of the invitation pathway
@@ -1477,7 +1481,7 @@ so we didn't send them an invitation. We did send invitations to everyone else!"
self.assertEqual(cm.exception.error_type, ConfirmationKeyException.DOES_NOT_EXIST)
# Verify that using the wrong type doesn't work in the main confirm code path
- email_change_url = create_confirmation_link(user, 'host', Confirmation.EMAIL_CHANGE)
+ email_change_url = create_confirmation_link(prereg_user, 'host', Confirmation.EMAIL_CHANGE)
email_change_key = email_change_url.split('/')[-1]
url = '/accounts/do_confirm/' + email_change_key
result = self.client_get(url)
@@ -1485,8 +1489,12 @@ so we didn't send them an invitation. We did send invitations to everyone else!"
"confirmation link in the system."], result)
def test_confirmation_expired(self) -> None:
- user = self.example_user('hamlet')
- url = create_confirmation_link(user, 'host', Confirmation.USER_REGISTRATION)
+ email = self.nonreg_email("alice")
+ realm = get_realm('zulip')
+ inviter = self.example_user('iago')
+ prereg_user = PreregistrationUser.objects.create(
+ email=email, referred_by=inviter, realm=realm)
+ url = create_confirmation_link(prereg_user, 'host', Confirmation.USER_REGISTRATION)
registration_key = url.split('/')[-1]
conf = Confirmation.objects.filter(confirmation_key=registration_key).first()
|
fix pytorch mobile build
Summary:
Pull Request resolved:
add a missing file and fix a std::to_string call.
Test Plan: buck build //xplat/caffe2:torchAndroid#android-armv7,shared | @@ -385,7 +385,7 @@ std::string mangleMethodName(
for (size_t method_idx = 0;; method_idx++) {
auto mangled = method_name;
if (method_idx != 0) {
- mangled += std::to_string(method_idx);
+ mangled += c10::to_string(method_idx);
}
bool found = false;
for (Function* fn : mod_type->methods()) {
|
1.1.5 hotfix changelog
Summary:
Changelog for an OSS release that includes the partitions page fix. | # Changelog
+# 1.1.5 (core) / 0.17.5 (libraries)
+
+### Bugfixes
+
+- [dagit] Fixed an issue where the Partitions tab sometimes failed to load for asset jobs.
+
# 1.1.4 (core) / 0.17.4 (libraries)
### Community Contributions
|
Add MockAsyncWebhook to mock `discord.Webhook` objects
I have added a mock type to mock `discord.Webhook` instances. Note
that the current type is specifically meant to mock webhooks that
use an AsyncAdaptor and therefore has AsyncMock/coroutine mocks for
the "maybe-coroutine" methods specified in the `discord.py` docs. | @@ -502,3 +502,24 @@ class MockReaction(CustomMockMixin, unittest.mock.MagicMock):
self.message = kwargs.get('message', MockMessage())
self.users = AsyncIteratorMock(kwargs.get('users', []))
+
+webhook_instance = discord.Webhook(data=unittest.mock.MagicMock(), adapter=unittest.mock.MagicMock())
+
+
+class MockAsyncWebhook(CustomMockMixin, unittest.mock.MagicMock):
+ """
+ A MagicMock subclass to mock Webhook objects using an AsyncWebhookAdapter.
+
+ Instances of this class will follow the specifications of `discord.Webhook` instances. For
+ more information, see the `MockGuild` docstring.
+ """
+
+ def __init__(self, **kwargs) -> None:
+ super().__init__(spec_set=webhook_instance, **kwargs)
+
+ # Because Webhooks can also use a synchronous "WebhookAdapter", the methods are not defined
+ # as coroutines. That's why we need to set the methods manually.
+ self.send = AsyncMock()
+ self.edit = AsyncMock()
+ self.delete = AsyncMock()
+ self.execute = AsyncMock()
|
Rewrite TestCompleter to not inherit from BaseControllerTest
SIM:
CR: | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import os
+import shutil
import mock
-
-from .basecontrollertest import BaseControllerTest
+from pytest_socket import disable_socket, enable_socket
+import unittest
from ebcli.core import fileoperations
+from ebcli.core.ebcore import EB
from ebcli.objects.solutionstack import SolutionStack
-from ebcli.objects.tier import Tier
-class TestCompleter(BaseControllerTest):
- solution = SolutionStack('64bit Amazon Linux 2014.03 '
- 'v1.0.6 running PHP 5.5')
- app_name = 'ebcli-test-app'
- tier = Tier.get_default()
+class TestCompleter(unittest.TestCase):
+ solution = SolutionStack('64bit Amazon Linux 2014.03 v1.0.6 running PHP 5.5')
+ app_name = 'ebcli-intTest-app'
def setUp(self):
- self.module_name = 'create'
- super(TestCompleter, self).setUp()
- fileoperations.create_config_file(self.app_name, 'us-west-2',
- self.solution.name)
+ disable_socket()
+ self.root_dir = os.getcwd()
+ if not os.path.exists('testDir'):
+ os.mkdir('testDir')
+
+ os.chdir('testDir')
+
+ fileoperations.create_config_file(
+ self.app_name,
+ 'us-west-2',
+ self.solution.name
+ )
- def test_base_commands(self):
- """
- testing for base controllers
- """
+ def tearDown(self):
+ os.chdir(self.root_dir)
+ shutil.rmtree('testDir')
- # run cmd
- self.run_command('completer', '--cmplt', ' ')
+ enable_socket()
- output = set(self.mock_output.call_args[0])
+ @mock.patch('ebcli.core.io.echo')
+ def test_base_commands(
+ self,
+ echo_mock
+ ):
+ self.app = EB(argv=['completer', '--cmplt', ' '])
+ self.app.setup()
+ self.app.run()
# Its best to hard code this
# That way we can make sure this list matches exactly
@@ -71,4 +84,4 @@ class TestCompleter(BaseControllerTest):
'tags'
}
- self.assertEqual(expected, output)
+ self.assertEqual(expected, set(echo_mock.call_args[0]))
|
fix recognition on stages not consuming AP
close | @@ -31,7 +31,7 @@ def recognize(img):
logger.logtext('ccoeff=%f' % coef)
consume_ap = coef > 0.9
- apimg = img.crop((100 * vw - 22.917 * vh, 2.917 * vh, 100 * vw, 8.194 * vh)).convert('L')
+ apimg = img.crop((100 * vw - 21.019 * vh, 2.917 * vh, 100 * vw, 8.194 * vh)).convert('L')
reco_Noto, reco_Novecento = load_data()
apimg = imgops.enhance_contrast(apimg, 80, 255)
logger.logimage(apimg)
@@ -47,6 +47,9 @@ def recognize(img):
opidtext = opidtext[:-1]
opidtext = opidtext.upper()
logger.logtext(opidtext)
+ if opidtext[0] == '0':
+ opidtext = 'O' + opidtext[1:]
+ logger.logtext('fixed to ' + opidtext)
# print('operation:', opidtext)
delegateimg = img.crop((100 * vw - 32.778 * vh, 79.444 * vh, 100 * vw - 4.861 * vh, 85.417 * vh)).convert('L')
@@ -57,7 +60,7 @@ def recognize(img):
delegated = mse < 400
# print('delegated:', delegated)
- consumeimg = img.crop((100 * vw - 14.306 * vh, 94.028 * vh, 100 * vw - 7.222 * vh, 97.361 * vh)).convert('L')
+ consumeimg = img.crop((100 * vw - 12.870 * vh, 94.028 * vh, 100 * vw - 7.222 * vh, 97.361 * vh)).convert('L')
consumeimg = imgops.enhance_contrast(consumeimg, 80, 255)
logger.logimage(consumeimg)
consumetext, minscore = reco_Noto.recognize2(consumeimg, subset='-0123456789')
|
mypy fix
Summary: Fix race condition Set -> AbstractSet for resource events
Test Plan: mypy
Reviewers: alangenfeld, sandyryza | import logging
import os
from enum import Enum
-from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Set, Union, cast
+from typing import TYPE_CHECKING, AbstractSet, Any, Dict, List, NamedTuple, Optional, Union, cast
from dagster import check
from dagster.core.definitions import (
@@ -776,7 +776,7 @@ def resource_init_start(
pipeline_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
- resource_keys: Set[str],
+ resource_keys: AbstractSet[str],
) -> "DagsterEvent":
return DagsterEvent.from_resource(
@@ -826,7 +826,7 @@ def resource_init_failure(
pipeline_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
- resource_keys: Set[str],
+ resource_keys: AbstractSet[str],
error: SerializableErrorInfo,
) -> "DagsterEvent":
@@ -847,7 +847,7 @@ def resource_teardown_failure(
pipeline_name: str,
execution_plan: "ExecutionPlan",
log_manager: DagsterLogManager,
- resource_keys: Set[str],
+ resource_keys: AbstractSet[str],
error: SerializableErrorInfo,
) -> "DagsterEvent":
|
Mark ParamType.fail() as NoReturn
This function just raises a click.BadParameter exception with the supplied arguments. | @@ -351,7 +351,7 @@ class _ParamType:
def split_envvar_value(self, rv: str) -> List[str]:
...
- def fail(self, message: str, param: Optional[Parameter] = ..., ctx: Optional[Context] = ...) -> None:
+ def fail(self, message: str, param: Optional[Parameter] = ..., ctx: Optional[Context] = ...) -> NoReturn:
...
|
simplify _int_or_vec helper function
This patch reduces the number of arguments of the _int_or_vec helper function
by providing f as a bound method. | @@ -3405,9 +3405,9 @@ def dotarg(__argname: str, *arrays: IntoArray, shape: Tuple[int, ...] = (), dtyp
# BASES
-def _int_or_vec(f, self, arg, argname, nargs, nvals):
+def _int_or_vec(f, arg, argname, nargs, nvals):
if isinstance(arg, numbers.Integral):
- return f(self, int(numeric.normdim(nargs, arg)))
+ return f(int(numeric.normdim(nargs, arg)))
if numeric.isboolarray(arg):
if arg.shape != (nargs,):
raise IndexError('{} has invalid shape'.format(argname))
@@ -3422,7 +3422,7 @@ def _int_or_vec(f, self, arg, argname, nargs, nvals):
raise IndexError('{} out of bounds'.format(argname))
mask = numpy.zeros(nvals, dtype=bool)
for d in arg:
- mask[numpy.asarray(f(self, d))] = True
+ mask[numpy.asarray(f(d))] = True
return mask.nonzero()[0]
raise IndexError('invalid {}'.format(argname))
@@ -3430,14 +3430,14 @@ def _int_or_vec(f, self, arg, argname, nargs, nvals):
def _int_or_vec_dof(f):
@functools.wraps(f)
def wrapped(self, dof: Union[numbers.Integral, numpy.ndarray]) -> numpy.ndarray:
- return _int_or_vec(f, self, arg=dof, argname='dof', nargs=self.ndofs, nvals=self.nelems)
+ return _int_or_vec(f.__get__(self), arg=dof, argname='dof', nargs=self.ndofs, nvals=self.nelems)
return wrapped
def _int_or_vec_ielem(f):
@functools.wraps(f)
def wrapped(self, ielem: Union[numbers.Integral, numpy.ndarray]) -> numpy.ndarray:
- return _int_or_vec(f, self, arg=ielem, argname='ielem', nargs=self.nelems, nvals=self.ndofs)
+ return _int_or_vec(f.__get__(self), arg=ielem, argname='ielem', nargs=self.nelems, nvals=self.ndofs)
return wrapped
|
Add some metrics for pghoard base backup
backup duration
backup completed
backup failed
backup completed
backup failed | @@ -145,6 +145,7 @@ class PGBaseBackup(Thread):
def run(self):
try:
basebackup_mode = self.site_config["basebackup_mode"]
+ start_time = time.monotonic()
if basebackup_mode == BaseBackupMode.basic:
self.run_basic_basebackup()
elif basebackup_mode == BaseBackupMode.local_tar:
@@ -159,6 +160,7 @@ class PGBaseBackup(Thread):
raise errors.InvalidConfigurationError("Unsupported basebackup_mode {!r}".format(basebackup_mode))
except Exception as ex: # pylint: disable=broad-except
+ self.metrics.increase("pghoard.basebackup_failed")
if isinstance(ex, (BackupFailure, errors.InvalidConfigurationError)):
self.log.error(str(ex))
else:
@@ -169,6 +171,11 @@ class PGBaseBackup(Thread):
# post a failure event
self.callback_queue.put(CallbackEvent(success=False, exception=ex))
+ else:
+ backup_time = time.monotonic() - start_time
+ self.metrics.gauge("pghoard.backup_time", backup_time, tag={"basebackup_mode": basebackup_mode})
+ self.metrics.increase("pghoard.basebackup_completed")
+
finally:
self.running = False
@@ -927,11 +934,6 @@ class PGBaseBackup(Thread):
backup_stopped = True
backup_time = time.monotonic() - start_time
- self.metrics.gauge(
- "pghoard.backup_time_{}".format(self.site_config["basebackup_mode"]),
- backup_time,
- )
-
self.log.info(
"Basebackup generation finished, %r files, %r chunks, "
"%r byte input, %r byte output, took %r seconds, waiting to upload", total_file_count, chunks_count,
|
Unify OpenMP calibration code
Remove the duplication, and simplify the branching logic so it's easier
to read.
Also OpenMP is written like that, not as OPENMP in all-caps. | @@ -65,10 +65,10 @@ else:
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import platform
-from .utilities import _blas_info
+from qutip.utilities import _blas_info
qutip.settings.eigh_unsafe = (_blas_info() == "OPENBLAS" and
platform.system() == 'Darwin')
-del platform
+del platform, _blas_info
# -----------------------------------------------------------------------------
# setup the cython environment
#
@@ -213,24 +213,24 @@ from qutip.cite import *
import qutip.configrc
has_rc, rc_file = qutip.configrc.has_qutip_rc()
-# Make qutiprc and benchmark OPENMP if has_rc = False
-if qutip.settings.has_openmp and (not has_rc):
+# Read the OpenMP threshold out if it already exists, or calibrate and save it
+# if it doesn't.
+if qutip.settings.has_openmp:
+ _calibrate_openmp = qutip.settings.num_cpus > 1
+ if has_rc:
+ _calibrate_openmp = (
+ _calibrate_openmp
+ and not qutip.configrc.has_rc_key('openmp_thresh', rc_file=rc_file)
+ )
+ else:
qutip.configrc.generate_qutiprc()
has_rc, rc_file = qutip.configrc.has_qutip_rc()
- if has_rc and qutip.settings.num_cpus > 1:
- from qutip.cy.openmp.bench_openmp import calculate_openmp_thresh
- #bench OPENMP
- print('Calibrating OPENMP threshold...')
- thrsh = calculate_openmp_thresh()
- qutip.configrc.write_rc_key('openmp_thresh', thrsh, rc_file=rc_file)
-# Make OPENMP if has_rc but 'openmp_thresh' not in keys
-elif qutip.settings.has_openmp and has_rc:
- has_omp_key = qutip.configrc.has_rc_key(rc_file, 'openmp_thresh')
- if not has_omp_key and qutip.settings.num_cpus > 1:
+ if _calibrate_openmp:
+ print('Calibrating OpenMP threshold...')
from qutip.cy.openmp.bench_openmp import calculate_openmp_thresh
- print('Calibrating OPENMP threshold...')
- thrsh = calculate_openmp_thresh()
- qutip.configrc.write_rc_key('openmp_thresh', thrsh, rc_file=rc_file)
+ thresh = calculate_openmp_thresh()
+ qutip.configrc.write_rc_key('openmp_thresh', thresh, rc_file=rc_file)
+ del calculate_openmp_thresh
# Load the config file
if has_rc:
|
bumped vault version to latest;
git commit -m | FROM alpine
-ENV VAULT_VERSION 0.6.2
+ENV VAULT_VERSION 0.7.0
ADD https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip vault.zip
RUN apk add --update unzip openssl ca-certificates curl jq && \
|
Add time_line_operator to notifications, enable printing of CPE estimates in stderr log
Summary: Add time_line_operator to notifications, enable printing of IPS and Direct scores in stderr log | @@ -267,6 +267,9 @@ class DoublyRobustEstimator:
)
direct_method_score = float(torch.mean(direct_method_values))
+ logger.info(
+ f"Normalized Direct method score = {direct_method_score * normalizer}"
+ )
direct_method_std_error = bootstrapped_std_error_of_mean(
direct_method_values.squeeze(),
sample_percent=hp.bootstrap_sample_percent,
@@ -289,6 +292,8 @@ class DoublyRobustEstimator:
# policy
ips_score = float(torch.mean(ips))
+ logger.info(f"Normalized IPS score = {ips_score * normalizer}")
+
ips_score_std_error = bootstrapped_std_error_of_mean(
ips.squeeze(),
sample_percent=hp.bootstrap_sample_percent,
|
handle scenario optimizing in proforma/views.py
was returning incomplete proforma if scenario was still optimizing | @@ -55,6 +55,9 @@ def proforma(request, run_uuid):
try:
scenario = ScenarioModel.objects.get(run_uuid=run_uuid)
+ if scenario.status.lower() == "optimizing...":
+ return HttpResponse("Problem is still solving. Please try again later.", status=425) # too early status
+
try: # see if Proforma already created
pf = ProForma.objects.get(scenariomodel=scenario)
except:
@@ -75,6 +78,9 @@ def proforma(request, run_uuid):
if type(e).__name__ == 'DoesNotExist':
msg = "Scenario {} does not exist.".format(run_uuid)
return HttpResponse(msg, status=404)
+ elif "Problem is still solving" in e.args:
+ msg = e.args[0]
+ return HttpResponse(msg, status=425) # too early status
else:
exc_type, exc_value, exc_traceback = sys.exc_info()
err = UnexpectedError(exc_type, exc_value, exc_traceback, task='proforma', run_uuid=run_uuid)
|
feat(stock_zh_index_value_csindex): add stock_zh_index_value_csindex interface
add stock_zh_index_value_csindex interface | @@ -376,7 +376,7 @@ def stock_zh_a_minute(
if __name__ == "__main__":
- stock_zh_a_daily_hfq_df_one = stock_zh_a_daily(symbol="sh603843", start_date="20171103", end_date="20210908", adjust="")
+ stock_zh_a_daily_hfq_df_one = stock_zh_a_daily(symbol="sh600308", start_date="20171103", end_date="20211021", adjust="qfq")
print(stock_zh_a_daily_hfq_df_one)
stock_zh_a_daily_hfq_df_three = stock_zh_a_daily(symbol="sz000001", start_date="19900103", end_date="20210118", adjust="qfq")
|
Reduce priority for max gpu test
Reduce priority of max gpu provision test to 3 | @@ -110,10 +110,10 @@ class GpuTestSuite(TestSuite):
""",
timeout=TIMEOUT,
- # min_gpu_count is 8 since it is currently the max GPU count supported
- # in Azure, 'Standard_ND96asr_v4'
+ # min_gpu_count is 8 since it is current
+ # max GPU count available in Azure
requirement=simple_requirement(min_gpu_count=8),
- priority=1,
+ priority=3,
)
def verify_max_gpu_provision(self, node: Node, log: Logger) -> None:
start_stop = node.features[StartStop]
|
config_util improvement
* Minor improvement to config_util
1. Environment varible for not using gin. gin's wrapper is very complicated, which can make debugging unfriendly and slow down the execution.
2. Error report for misuse of alf.config()
* comment
* Fix message | @@ -83,6 +83,8 @@ def config(prefix_or_dict, mutable=True, raise_if_used=True, **kwargs):
**kwargs: only used if ``prefix_or_dict`` is a str.
"""
if isinstance(prefix_or_dict, str):
+ assert len(kwargs) > 0, ("**kwargs should be provided when "
+ "'prefix_or_dict' is a str")
prefix = prefix_or_dict
configs = dict([(prefix + '.' + k, v) for k, v in kwargs.items()])
elif isinstance(prefix_or_dict, dict):
@@ -579,7 +581,8 @@ def _decorate(fn_or_cls, name, whitelist, blacklist):
else:
fn_or_cls = _make_wrapper(fn_or_cls, configs, signature, has_self=0)
- if fn_or_cls.__module__ != '<run_path>':
+ if fn_or_cls.__module__ != '<run_path>' and os.environ.get(
+ 'ALF_USE_GIN', "1") == "1":
# If a file is executed using runpy.run_path(), the module name is
# '<run_path>', which is not an acceptable name by gin.
return gin.configurable(
@@ -667,7 +670,9 @@ def configurable(fn_or_name=None, whitelist=[], blacklist=[]):
Note: currently, to maintain the compatibility with gin-config, all the
functions decorated using alf.configurable are automatically configurable
using gin. The values specified using ``alf.config()`` will override
- values specified through gin.
+ values specified through gin. Gin wrapper is quite convoluted and can make
+ debugging more challenging. It can be disabled by setting environment
+ varialbe ALF_USE_GIN to 0 if you are not using gin.
Args:
fn_or_name (Callable|str): A name for this configurable, or a function
|
Stop trying to use pytest-xdist when using pytest-cov
It works fine for me on my Mac, but it doesn't seem to work right on either TravisCI or AppVeyor.
It appears to calculate the code coverage incorrectly when using pytest-xdist. | @@ -12,11 +12,9 @@ deps =
pyperclip
pytest
pytest-cov
- pytest-xdist
six
commands =
- py.test -v -n2 --cov=cmd2 --basetemp={envtmpdir} {posargs}
- {envpython} examples/example.py --test examples/exampleSession.txt
+ py.test -v --cov=cmd2 --basetemp={envtmpdir} {posargs}
codecov
[testenv:py33]
@@ -57,11 +55,9 @@ deps =
pyperclip
pytest
pytest-cov
- pytest-xdist
six
commands =
- py.test -v -n2 --cov=cmd2 --basetemp={envtmpdir} {posargs}
- {envpython} examples/example.py --test examples/exampleSession.txt
+ py.test -v --cov=cmd2 --basetemp={envtmpdir} {posargs}
codecov
[testenv:py37]
|
Don't send empty dict on shutdown
Fixes | @@ -148,10 +148,6 @@ class Request:
r["jsonrpc"] = "2.0"
r["id"] = id
r["method"] = self.method
- if self.params is not None:
- r["params"] = self.params
- else:
- r["params"] = dict()
return r
|
Updated elmah-log-file.yaml
Updated redirect, description and reference | @@ -2,15 +2,24 @@ id: elmah-log-file
info:
name: elmah.axd Disclosure
- author: shine
+ author: shine, idealphase
severity: medium
+ description: |
+ ELMAH (Error Logging Modules and Handlers) is an application-wide error logging facility that is completely pluggable. It can be dynamically added to a running ASP.NET web application, or even all ASP.NET web applications on a machine, without any need for re-compilation or re-deployment.
+ reference:
+ - https://code.google.com/archive/p/elmah/
+ - https://www.troyhunt.com/aspnet-session-hijacking-with-google/
tags: logs,exposure
requests:
- method: GET
path:
- "{{BaseURL}}/elmah.axd"
+ - "{{BaseURL}}/elmah"
+ stop-at-first-match: true
+ host-redirects: true
+ max-redirects: 2
matchers-condition: and
matchers:
|
[Holidays] Check for future month of current year
Advanced error handling | @@ -37,7 +37,9 @@ class Holidays:
"Use {}holidays setkey to set API key.".format(ctx.prefix)))
return
if not self.config["premiumkey"] \
- and month == int(datetime.now().strftime('%m')) or year > int(datetime.now().strftime('%Y')):
+ and month == int(datetime.now().strftime('%m')) or \
+ year > int(datetime.now().strftime('%Y')) or \
+ (year >= int(datetime.now().strftime('%Y')) and month > int(datetime.now().strftime('%m'))):
year = int(datetime.now().strftime('%Y')) - 1
await self.bot.say(chat.warning("This bot has set non-premium key, "
"so current and upcoming holiday data is unavailable. "
@@ -58,14 +60,11 @@ class Holidays:
"public": "Public"},
tablefmt="orgtbl")):
await self.bot.say(chat.box(page))
- elif response["status"] == 400:
+ elif response["status"] < 500:
await self.bot.say(chat.error("Something went wrong... "
"Server returned client error code {}. "
"This is possibly cog error.\n"
"{}").format(response["status"], chat.inline(response["error"])))
- elif response["status"] < 500:
- await self.bot.say(chat.error("Something went wrong... "
- "Server returned client error code {}.").format(response["status"]))
else:
await self.bot.say(chat.error("Something went wrong... Server returned server error code {}. "
"Try again later.").format(response["status"]))
|
Implemented some more commands
But I gotta figure out how to vectorise them without explicitly vectorising them | @@ -278,6 +278,9 @@ class Stack(list):
def __mult__(self, rhs):
return self.contents * rhs
+ def __iter__(self):
+ return iter(self.contents)
+
def do_map(self, fn):
temp = []
obj = self.pop()
@@ -400,8 +403,8 @@ def orderless_range(a, b, lift_factor=0):
def summate(item):
x = as_iter(item)
result = 0
- for _ in x:
- result = add(result, _)
+ for v in x:
+ result = add(result, v)
return result
|
chore: update readme slogan
Making coherence for brand slogan | <img src="https://github.com/jina-ai/jina/blob/master/.github/logo-only.gif?raw=true" alt="Jina banner" width="200px">
</p>
<p align="center">
-The easiest way to build neural search on the cloud
+An easier way to build neural search in the cloud
</p>
<br>
|
Update new password prompt
No longer prompts for new password if --new_password parameter is used | @@ -166,7 +166,7 @@ def init_cmdline(config_options, wallet_path, server, *, config: 'SimpleConfig')
config_options['password'] = config_options.get('password') or password
- if cmd.name == 'password':
+ if cmd.name == 'password' and 'new_password' not in config_options:
new_password = prompt_password('New password:')
config_options['new_password'] = new_password
|
[batch] always use throttler to delete pods
Without this fix, we almost never release the semaphore | @@ -670,7 +670,7 @@ class Job:
self._state = new_state
- await self._delete_pod()
+ await app['pod_throttler'].delete_pod(self)
await self._delete_pvc()
await self.notify_children(new_state)
|
Change submodules to use https instead of git
* This should be easier to get through proxies and work for
more people according to Github. | [submodule "tests/CPython26"]
path = tests/CPython26
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython26
[submodule "tests/CPython27"]
path = tests/CPython27
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython27
[submodule "tests/CPython32"]
path = tests/CPython32
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython32
[submodule "tests/CPython33"]
path = tests/CPython33
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython33
[submodule "tests/CPython34"]
path = tests/CPython34
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython34
[submodule "tests/CPython35"]
path = tests/CPython35
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython35
[submodule "tests/CPython36"]
path = tests/CPython36
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython36
[submodule "tests/CPython37"]
path = tests/CPython37
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython37
[submodule "tests/CPython38"]
path = tests/CPython38
- url = [email protected]:Nuitka/Nuitka-CPython-tests
+ url = https://github.com/Nuitka/Nuitka-CPython-tests.git
branch = CPython38
|
Fixed typo in vm.args example
I think in the vm.args example the line with `-kernel inet_dist_listen_max 9100` should in fact be `-kernel inet_dist_listen_min 9100` | @@ -127,7 +127,7 @@ and ``-kernel inet_dist_listen_max 9200`` like below:
-name ...
-setcookie ...
...
- -kernel inet_dist_listen_max 9100
+ -kernel inet_dist_listen_min 9100
-kernel inet_dist_listen_max 9200
.. _cluster/setup/wizard:
|
Run pyright as part of the CI process
This doesn't do verifytypes yet due to a bug in Pyright | @@ -41,9 +41,7 @@ jobs:
- name: Run pyright
run: |
- # It is OK for the types to not pass at this stage
- # We are just running it as a quick reference check
- pyright || echo "Type checking did not pass"
+ pyright
- name: Run black
if: ${{ always() && steps.install-deps.outcome == 'success' }}
|
Fixed patch version
This should have been bumped _before_ 0.60.12.1 was tagged. Not sure if we want to force re-tag or release 0.60.12.2 (making this commit a bit silly) | @@ -50,7 +50,7 @@ import distutils.dir_util
gafferMilestoneVersion = 0 # for announcing major milestones - may contain all of the below
gafferMajorVersion = 60 # backwards-incompatible changes
gafferMinorVersion = 12 # new backwards-compatible features
-gafferPatchVersion = 0 # bug fixes
+gafferPatchVersion = 1 # bug fixes
# All of the following must be considered when determining
# whether or not a change is backwards-compatible
|
Fix misnamed variable in view_file template
h/t | userId: ${ user['id'] | sjson, n },
userName: ${ user['fullname'] | sjson, n },
userUrl: ${ ('/' + user['id'] + '/') if user['id'] else None | sjson, n },
- userProfileImage: ${ urls['profile_image_url'].replace('&', '&') | sjson, n }
+ userProfileImage: ${ urls['profile_image'].replace('&', '&') | sjson, n }
},
node: {
urls: {
|
test: xfailed TaskExecutable termination. Bug
needs to be fixed. | @@ -254,7 +254,7 @@ def test_task_depend_success(tmpdir, cls, expected):
],
"2020-01-01 07:30",
False,
- id="Don't run (terminated)"),
+ id="Don't run (terminated)", marks=pytest.mark.xfail(reason="BUG: termination should correspond to failure")),
pytest.param(
# Termination is kind of failing but retry is not applicable as termination is often
@@ -267,7 +267,7 @@ def test_task_depend_success(tmpdir, cls, expected):
],
"2020-01-01 07:30",
False,
- id="Don't run (terminated with retries)"),
+ id="Don't run (terminated with retries)", marks=pytest.mark.xfail(reason="BUG: termination should correspond to failure")),
pytest.param(
lambda:TaskExecutable(task="the task", period=TimeOfDay("07:00", "08:00")),
|
Fix virt.pool_running state documentation
virt.pool_running needs the source to be a dictionary, which the
documentation was not reflecting. Along the same lines the source hosts
need to be a list, adjust the example to show it. | @@ -744,11 +744,11 @@ def pool_running(name,
- owner: 1000
- group: 100
- source:
- - dir: samba_share
- - hosts:
- one.example.com
- two.example.com
- - format: cifs
+ dir: samba_share
+ hosts:
+ - one.example.com
+ - two.example.com
+ format: cifs
- autostart: True
'''
|
Remove NVCR.io workaround
Now that we have knative 0.7 the workaround is no longer needed! | 2. Your cluster's Istio Ingress gateway must be network accessible.
3. Your cluster's Istio Egresss gateway must [allow Google Cloud Storage](https://knative.dev/docs/serving/outbound-network-access/)
-
-> Knative is not able to resolve containers from the NVIDIA container registry.
-To work around this you need to skip resolution for nvcr.io by editing knative config-deployment.yaml. See details here: https://github.com/knative/serving/issues/4355
## Create the KFService
Apply the CRD
```
|
Exposing NeutronDhcpOvsIntegrationBridge
Using this, users can assign already available parameter
ovs_integration_bridge in dhcp_agent.ini | @@ -84,10 +84,15 @@ parameters:
type: string
description: Specifies the default CA cert to use if TLS is used for
services in the internal network.
+ NeutronDhcpOvsIntegrationBridge:
+ default: ''
+ type: string
+ description: Name of Open vSwitch bridge to use
conditions:
service_debug_unset: {equals: [{get_param: NeutronDhcpAgentDebug}, '']}
internal_tls_enabled: {equals: [{get_param: EnableInternalTLS}, true]}
+ dhcp_ovs_intergation_bridge_unset: {equals: [{get_param: NeutronDhcpOvsIntegrationBridge}, '']}
resources:
@@ -135,6 +140,10 @@ outputs:
neutron::agents::dhcp::ovsdb_agent_ssl_cert_file: '/etc/pki/tls/certs/neutron.crt'
neutron::agents::dhcp::ovsdb_agent_ssl_ca_file: {get_param: InternalTLSCAFile}
- {}
+ - if:
+ - dhcp_ovs_intergation_bridge_unset
+ - {}
+ - neutron::agents::dhcp::ovs_integration_bridge: {get_param: NeutronDhcpOvsIntegrationBridge}
service_config_settings:
fluentd:
tripleo_fluentd_groups_neutron_dhcp:
|
Fix language for sphinx 5.0
As per | @@ -93,7 +93,7 @@ release = bluesky.__version__
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
-language = None
+language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
|
Fix line endings in windows machine
On windows machines the default line endings are \r\n instead of \n. This causes code to fail in the filterPair function. Fix details here | @@ -250,7 +250,7 @@ conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"
# Write new csv file
print("\nWriting newly formatted file...")
with open(datafile, 'w', encoding='utf-8') as outputfile:
- writer = csv.writer(outputfile, delimiter=delimiter)
+ writer = csv.writer(outputfile, delimiter=delimiter, lineterminator='\n')
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
|
Update Tezos sources
Problem: yet another Tezos version was released.
We need to bump the revisions used.
Solution: updated versions for Tezos sources. | "url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"tezos": {
- "ref": "refs/tags/v9.6",
+ "ref": "refs/tags/v9.7",
"repo": "https://gitlab.com/tezos/tezos",
- "rev": "af8891d9a11fe9e96dd4e0d6a54f095ca4d6f23b",
+ "rev": "853629c2c4db0003b073cce0dddbfb69e73d3275",
"type": "git"
}
}
|
Updated time widget to not show today icon
This doesn't do anything for time-only widgets. | @@ -761,7 +761,7 @@ hqDefine("cloudcare/js/form_entry/entries", function () {
var d = moment(date, self.clientFormat);
return d.isValid() ? d : null;
},
- }));
+ }, self.extraOptions));
self.$picker.on("dp.change", function (e) {
if (!e.date) {
self.answer(Const.NO_ANSWER);
@@ -785,6 +785,9 @@ hqDefine("cloudcare/js/form_entry/entries", function () {
// Formatting string should be in moment format: https://momentjs.com/docs/#/displaying/format/
DateTimeEntryBase.prototype.serverFormat = undefined;
+ // Extra options to pass to datetimepicker widget
+ DateTimeEntryBase.prototype.extraOptions = {};
+
function DateEntry(question, options) {
this.templateType = 'date';
DateTimeEntryBase.call(this, question, options);
@@ -804,6 +807,7 @@ hqDefine("cloudcare/js/form_entry/entries", function () {
TimeEntry.prototype.clientFormat = 'HH:mm';
TimeEntry.prototype.serverFormat = 'HH:mm';
+ TimeEntry.prototype.extraOptions = {showTodayButton: false};
function EthiopianDateEntry(question, options) {
var self = this,
|
adapting caffe2 operator docs generator to pytorch url
Summary: Pull Request resolved: | @@ -54,8 +54,8 @@ class GHMarkdown(Markdown):
def getCodeLink(formatter, schema):
formatter = formatter.clone()
- path = os.path.join("caffe2", os.path.relpath(schema.file, "caffe2"))
- schemaLink = ('https://github.com/caffe2/caffe2/blob/master/{path}'
+ path = os.path.relpath(schema.file, "caffe2")
+ schemaLink = ('https://github.com/pytorch/pytorch/blob/master/{path}'
.format(path=path))
formatter.addLink('{path}'.format(path=path), schemaLink)
return formatter.dump()
|
linopt: For MILP unit commitment GPLK should return "optimal"
Not "integer optimal". This is consistent with pyomo output. | @@ -623,8 +623,9 @@ def run_and_read_glpk(n, problem_fn, solution_fn, solver_logfile,
termination_condition = info.Status.lower().strip()
objective = float(re.sub('[^0-9\.\+\-]+', '', info.Objective))
- if termination_condition == "optimal":
+ if termination_condition in ["optimal","integer optimal"]:
status = "ok"
+ termination_condition = "optimal"
elif termination_condition == "undefined":
status = "warning"
termination_condition = "infeasible"
|
Opt out of nextjs telemetry
Fixes | @@ -145,6 +145,8 @@ FETCH_FEES = false
DISABLE_LINKS = true
DISABLE_LNMARKETS = true
NO_VERSION_CHECK = true
+# https://nextjs.org/telemetry#how-do-i-opt-out
+NEXT_TELEMETRY_DISABLED=1
# -----------
# Account Configs
|
[Concat] fix rtlsim by doing no order reversal during packing
this may be a problem in the future | @@ -132,16 +132,12 @@ class Concat(HLSCustomOp):
idt = self.get_input_datatype()
total_elems = self.get_total_elems()
total_bw = idt.bitwidth() * total_elems
- lbit = 0
- hbit = total_bw - 1
for (i, elems) in enumerate(elems_per_stream):
bw = idt.bitwidth() * elems
- lbit = hbit - bw + 1
inp_stream = "hls::stream<ap_uint<%d> > &in%d" % (bw, i)
inp_streams.append(inp_stream)
- cmd = "out_elem(%d,%d) = in%d.read();" % (hbit, lbit, i)
+ cmd = "in%d.read()" % i
commands.append(cmd)
- hbit = lbit - 1
out_stream = "hls::stream<ap_uint<%d> > &out" % (total_bw)
inp_streams.append(out_stream)
@@ -152,7 +148,7 @@ class Concat(HLSCustomOp):
impl_hls_code.append("for(unsigned int i = 0; i < numReps; i++) {")
impl_hls_code.append("#pragma HLS PIPELINE II=1")
impl_hls_code.append("ap_uint<%d> out_elem;" % total_bw)
- impl_hls_code.append("\n".join(commands))
+ impl_hls_code.append("out_elem = (" + ",".join(commands) + ");")
impl_hls_code.append("out.write(out_elem);")
impl_hls_code.append("}")
impl_hls_code.append("}")
@@ -210,7 +206,10 @@ class Concat(HLSCustomOp):
for i in range(n_inps):
nbits = self.get_instream_width(i)
rtlsim_inp = npy_to_rtlsim_input(
- "%s/input_%d.npy" % (code_gen_dir, i), export_idt, nbits
+ "%s/input_%d.npy" % (code_gen_dir, i),
+ export_idt,
+ nbits,
+ reverse_inner=False,
)
io_dict["inputs"]["in%d" % i] = rtlsim_inp
super().reset_rtlsim(sim)
@@ -224,7 +223,13 @@ class Concat(HLSCustomOp):
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
- rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits
+ rtlsim_output,
+ out_npy_path,
+ odt,
+ out_shape,
+ packed_bits,
+ target_bits,
+ reverse_inner=False,
)
# load and reshape output
output = np.load(out_npy_path)
|
Update for ElGoonishShive and ElGoonishShiveNP
Strip no longer supports ID numbers after May 21 site revamp per Dan Shive. Code here switched to ComicControl. Tested and verified locally. (fixes | @@ -11,7 +11,7 @@ from re import compile, escape, IGNORECASE
from ..helpers import indirectStarter, xpath_class
from ..scraper import _BasicScraper, _ParserScraper
from ..util import tagre
-from .common import _WordPressScraper, _WPNavi, WP_LATEST_SEARCH
+from .common import _ComicControlScraper, _WordPressScraper, _WPNavi, WP_LATEST_SEARCH
class EarthsongSaga(_ParserScraper):
@@ -74,24 +74,12 @@ class ElfOnlyInn(_BasicScraper):
help = 'Index format: yyyymmdd'
-class ElGoonishShive(_BasicScraper):
+class ElGoonishShive(_ComicControlScraper):
url = 'http://www.egscomics.com/'
- stripUrl = url + 'index.php?id=%s'
- imageSearch = compile(tagre("img", "src", r'(comics/[^"]+)',
- after="comic"))
- prevSearch = compile(tagre("a", "href", r'(/index\.php\?id=\d+)',
- after="prev"))
- help = 'Index format: number'
-class ElGoonishShiveNP(_BasicScraper):
+class ElGoonishShiveNP(_ComicControlScraper):
url = 'http://www.egscomics.com/egsnp.php'
- stripUrl = url + '?id=%s'
- imageSearch = compile(tagre("img", "src", r'(comics/[^"]+)',
- after="comic"))
- prevSearch = compile(tagre("a", "href", r'(/egsnp\.php\?id=\d+)',
- after="prev"))
- help = 'Index format: number'
class EmergencyExit(_BasicScraper):
|
clarify $db/_security section
- point out that values are optional
- add curl example
- language fixes | functionality.
If both the names and roles fields of either the admins or members
- properties are empty arrays, it means the database has no admins or
- members.
+ properties are empty arrays, or are not existent, it means the database
+ has no admins or members.
Having no admins, only server admins (with the reserved ``_admin`` role)
are able to update design document and make other admin level changes.
**Request**:
+ .. code-block:: bash
+
+ shell> curl http://localhost:5984/pineapple/_security -X PUT -H 'content-type: application/json' -H 'accept: application/json' -d '{"admins":{"names":["superuser"],"roles":["admins"]},"members":{"names": ["user1","user2"],"roles": ["developers"]}}'
+
.. code-block:: http
PUT /db/_security HTTP/1.1
|
Updates for SSO rebranding
Updates to the `credentials.rst` file to incorporate changes required by the SSO rebranding campaign.
Also updated the build settings to use 2022 as the copyright year. | @@ -51,7 +51,7 @@ master_doc = 'index'
# General information about the project.
project = 'Boto3 Docs'
-copyright = '2021, Amazon Web Services, Inc'
+copyright = '2022, Amazon Web Services, Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
|
str -> StringSource for azure compute log manager config
Summary: As the title.
Test Plan: Existing tests
Reviewers: alangenfeld | import os
from contextlib import contextmanager
-from dagster import Field, check, seven
+from dagster import Field, StringSource, check, seven
from dagster.core.storage.compute_log_manager import (
MAX_BYTES_FILE_READ,
ComputeIOType,
@@ -88,11 +88,11 @@ def inst_data(self):
@classmethod
def config_type(cls):
return {
- "storage_account": str,
- "container": str,
- "secret_key": str,
- "local_dir": Field(str, is_required=False),
- "prefix": Field(str, is_required=False, default_value="dagster"),
+ "storage_account": StringSource,
+ "container": StringSource,
+ "secret_key": StringSource,
+ "local_dir": Field(StringSource, is_required=False),
+ "prefix": Field(StringSource, is_required=False, default_value="dagster"),
}
@staticmethod
|
fs: ssh: pass timeout to paramiko's connect()
Prior to this commit, DVC could hang indefinitely when connecting to
remote SSH filesystems. This should address this. | @@ -40,7 +40,9 @@ class SSHConnection:
host=host, **kwargs
)
)
- self.timeout = kwargs.get("timeout", 1800)
+
+ kwargs.setdefault("timeout", 1800)
+ self.timeout = kwargs["timeout"]
self._ssh = paramiko.SSHClient()
|
[MNT] temp MR to remove 2977 merge conflicts - part 3
Reverts sktime/sktime#4206, see there for explanation of the strategy to resolve the merge conflict with | "maintenance"
]
},
+ {
+ "login": "pranavvp16",
+ "name": "Pranav Prajapati",
+ "avatar_url": "https://avatars.githubusercontent.com/u/94780581?v=4",
+ "profile": "https://www.linkedin.com/in/pranav-prajapati-a5b413226/",
+ "contributions": [
+ "code",
+ "test"
+ ]
+ },
+ {
+ "login": "romanlutz",
+ "name": "Roman Lutz",
+ "avatar_url": "https://avatars.githubusercontent.com/u/10245648?v=4",
+ "profile": "https://www.linkedin.com/in/romanlutz/",
+ "contributions":[
+ "doc"
+ ]
+ },
{
"login": "DBCerigo",
"name": "Daniel Burkhardt Cerigo",
|
Fix heatmap aug in ElasticTransformation
Fix for two errors in ElasticTransformation,
both affecting augmentation for heatmaps with
different sizes than the underlying images.
* Fix heatmap shape being read out incorrectly;
resulted in error.
* Fix generated heatmap arrays sometimes being
slightly below/above 0.0/1.0; resulted in
errors. | @@ -2046,7 +2046,7 @@ class ElasticTransformation(Augmenter):
# This may result in indices of moved pixels being different.
# To prevent this, we use the same image size as for the base images, but that
# requires resizing the heatmaps temporarily to the image sizes.
- height_orig, width_orig = heatmaps_i.arr_0to1
+ height_orig, width_orig = heatmaps_i.arr_0to1.shape[0:2]
heatmaps_i = heatmaps_i.scale(heatmaps_i.shape[0:2])
arr_0to1 = heatmaps_i.arr_0to1
(source_indices_x, source_indices_y), (_dx, _dy) = ElasticTransformation.generate_indices(
@@ -2063,6 +2063,11 @@ class ElasticTransformation(Augmenter):
cval=0,
mode="constant"
)
+
+ # interpolation in map_coordinates() can cause some values to be slightly
+ # below/above 1.0, so we clip here
+ arr_0to1_warped = np.clip(arr_0to1_warped, 0.0, 1.0, out=arr_0to1_warped)
+
heatmaps_i_warped = ia.HeatmapsOnImage.from_0to1(arr_0to1_warped, shape=heatmaps_i.shape, min_value=heatmaps_i.min_value, max_value=heatmaps_i.max_value)
heatmaps_i_warped = heatmaps_i_warped.scale((height_orig, width_orig))
heatmaps[i] = heatmaps_i_warped
|
Add sort order field from Customize Form
* Add sort order field from Customize Form
fixes support issue WN-SUP25048
* Handle case when meta_sort_field is undefined | @@ -91,16 +91,12 @@ frappe.ui.SortSelector = Class.extend({
var me = this;
var meta = frappe.get_meta(this.doctype);
+ var { meta_sort_field, meta_sort_order } = this.get_meta_sort_field();
+
if(!this.args.sort_by) {
- if(meta.sort_field) {
- if(meta.sort_field.indexOf(',')!==-1) {
- parts = meta.sort_field.split(',')[0].split(' ');
- this.args.sort_by = parts[0];
- this.args.sort_order = parts[1];
- } else {
- this.args.sort_by = meta.sort_field;
- this.args.sort_order = meta.sort_order.toLowerCase();
- }
+ if(meta_sort_field) {
+ this.args.sort_by = meta_sort_field;
+ this.args.sort_order = meta_sort_order;
} else {
// default
this.args.sort_by = 'modified';
@@ -115,7 +111,7 @@ frappe.ui.SortSelector = Class.extend({
if(!this.args.options) {
// default options
var _options = [
- {'fieldname': 'modified'},
+ {'fieldname': 'modified'}
]
// title field
@@ -130,9 +126,15 @@ frappe.ui.SortSelector = Class.extend({
}
});
- _options.push({'fieldname': 'name'});
- _options.push({'fieldname': 'creation'});
- _options.push({'fieldname': 'idx'});
+ // meta sort field
+ if(meta_sort_field) _options.push({ 'fieldname': meta_sort_field });
+
+ // more default options
+ _options.push(
+ {'fieldname': 'name'},
+ {'fieldname': 'creation'},
+ {'fieldname': 'idx'}
+ )
// de-duplicate
this.args.options = _options.uniqBy(function(obj) {
@@ -151,6 +153,21 @@ frappe.ui.SortSelector = Class.extend({
this.sort_by = this.args.sort_by;
this.sort_order = this.args.sort_order;
},
+ get_meta_sort_field: function() {
+ var meta = frappe.get_meta(this.doctype);
+ if(meta.sort_field && meta.sort_field.includes(',')) {
+ var parts = meta.sort_field.split(',')[0].split(' ');
+ return {
+ meta_sort_field: parts[0],
+ meta_sort_order: parts[1]
+ }
+ } else {
+ return {
+ meta_sort_field: meta.sort_field,
+ meta_sort_order: meta.sort_order.toLowerCase()
+ }
+ }
+ },
get_label: function(fieldname) {
if(fieldname==='idx') {
return __("Most Used");
|
Ignore skip-magic-trailing-comma formatting with git blame
This adds a formatting commit to ignore when doing git blame. | @@ -9,3 +9,9 @@ cb6940a40141dba95cba84f5acc27acbeb65b17c
# Format cirq-google with latest version of black (#5160)
77fb93af5ebbb5bcf7f8a4dc3fd2fba0e827488c
+
+# Format cirq-google with skip-magic-trailing-comma (#5171)
+01a7cb94a90482557ac26b705adbb379e139a7b2
+
+# Format according to new black rules (#5259)
+27152c63cc74d526e3adca721ac12e8842ac6fa5
|
Handle no common facade version
Ensure we handle issues where we can't find a common version of a
facade | @@ -651,9 +651,16 @@ class Connection:
# so in order to be compatible forwards and backwards we speak a
# common facade versions.
if name in client_facades:
+ try:
known = client_facades[name]['versions']
discovered = facade['versions']
version = max(set(known).intersection(set(discovered)))
+ except ValueError:
+ # this can occur if known is [1, 2] and discovered is [3, 4]
+ # there is just know way to know how to communicate with the
+ # facades we're trying to call.
+ raise errors.JujuConnectionError('unknown common facade version')
+ else:
self.facades[name] = version
async def login(self):
|
[core/engine] handle single-module errors
instead of terminating the whole status bar when an error occurs, just
show a (truncated) error for that single widget.
this should also enable auto-recovery if the module returns to a "good"
state, but that hasn't been tested yet.
see and | @@ -38,6 +38,7 @@ class Module(object):
self.name = config.get("name", self.__module__.split(".")[-1])
self._config = config
self.id = self.name
+ self.error = None
self._next = int(time.time())
self._default_interval = 0
@@ -67,6 +68,12 @@ class Module(object):
if widget.name == name:
return widget
+ def errorWidget(self):
+ msg = self.error
+ if len(msg) > 10:
+ msg = "{}...".format(msg[0:7])
+ return bumblebee.output.Widget(full_text="error: {}".format(msg))
+
def widget_by_id(self, uid):
for widget in self._widgets:
if widget.id == uid:
@@ -80,7 +87,12 @@ class Module(object):
def update_wrapper(self, widgets):
if self._next > int(time.time()):
return
+ try:
+ self.error = None
self.update(self._widgets)
+ except Exception as e:
+ log.error("error updating '{}': {}".format(self.name, str(e)))
+ self.error = str(e)
self._next += int(self.parameter("interval", self._default_interval))*60
def interval(self, intvl):
@@ -244,9 +256,12 @@ class Engine(object):
for module in self._modules:
self._current_module = module
module.update_wrapper(module.widgets())
+ if module.error == None:
for widget in module.widgets():
widget.link_module(module)
self._output.draw(widget=widget, module=module, engine=self)
+ else:
+ self._output.draw(widget=module.errorWidget(), module=module, engine=self)
self._output.flush()
self._output.end()
if self.running():
|
ci: Remove unnecessary steps from production upgrade script.
This removes some steps which are no longer necessary to be run
in the production upgrade script. The steps were used due to
errors related to supervisor failing to restart which was resolved
in the commit | @@ -11,13 +11,12 @@ set -x
# need to do some preparatory steps. It is a goal to delete these
# steps.
-# Reinstall rabbitmq-server and supervisor.
+# Reinstall rabbitmq-server.
#
# * For rabbitmq-server, we likely need to do this to work around the
# hostname changing on reboot causing RabbitMQ to not boot.
-# * For supervisor, we don't understand why it doesn't start properly.
-sudo apt-get -y purge rabbitmq-server supervisor
-sudo apt-get -y install rabbitmq-server supervisor
+sudo apt-get -y purge rabbitmq-server
+sudo apt-get -y install rabbitmq-server
# Start the postgresql service.
sudo service postgresql start
@@ -29,20 +28,8 @@ if ! sudo service rabbitmq-server start; then
sudo service rabbitmq-server start
fi
-# Apply puppet (still on the previous release the container was
-# installed with). This should leave us with a working copy of Zulip
-# running a previous release.
-sudo /home/zulip/deployments/current/scripts/zulip-puppet-apply -f
-
-# Stopping nginx service started by above command.
-#
-# This is a workaround for an unexpected `Unable to stop
-# Service[nginx]` error in the puppet apply step of upgrade otherwise.
-if ! sudo service nginx stop; then
- echo
- echo "Stoping nginx failed. Trying again:"
- sudo service nginx stop
-fi
+# Start the supervisor
+sudo service supervisor start
# Zulip releases before 2.1.8/3.5/4.4 have a bug in their
# `upgrade-zulip` scripts, resulting in them exiting with status 0
|
ceph-iscsi: don't use bracket with trusted_ip_list
The trusted_ip_list parameter for the rbd-target-api service doesn't
support ipv6 address with bracket.
Closes: | - name: add mgr ip address to trusted list with dashboard - ipv6
set_fact:
- trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last | ipwrap }}'
+ trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
|
Add profile to ReportInterfaceFacts
HG--
branch : feature/microservices | @@ -103,6 +103,7 @@ class ReportFilterApplication(SimpleReport):
(
mo.name,
mo.address,
+ mo.profile_name,
iface
)
]
@@ -110,6 +111,6 @@ class ReportFilterApplication(SimpleReport):
return self.from_dataset(
title=self.title,
columns=[
- _("Managed Object"), _("Address"), _("Interface")
+ _("Managed Object"), _("Address"), _("SA Profile"), _("Interface")
],
data=data)
|
downstream-ci: include the ocs version in the `latest-stable` tag
Builds that pass the acceptance tests will now be tagged as
`latest-stable-$ocs_version` instead of just `latest-stable`. | @@ -105,10 +105,11 @@ pipeline {
def registry_image = "${env.OCS_REGISTRY_IMAGE}"
// quay.io/rhceph-dev/ocs-registry:4.2-58.e59ca0f.master -> 4.2-58.e59ca0f.master
def registry_tag = registry_image.split(':')[-1]
- // tag ocs-registry container as 'latest-stable'
- build job: 'quay-tag-image', parameters: [string(name: "SOURCE_URL", value: "${registry_image}"), string(name: "QUAY_IMAGE_TAG", value: "ocs-registry:latest-stable")]
+ def ocs_version = registry_tag.split('-')[0]
+ // tag ocs-registry container as 'latest-stable-$ocs_version'
+ build job: 'quay-tag-image', parameters: [string(name: "SOURCE_URL", value: "${registry_image}"), string(name: "QUAY_IMAGE_TAG", value: "ocs-registry:latest-stable-${ocs_version}")]
// tag ocs-olm-operator container as 'latest-stable'
- build job: 'quay-tag-image', parameters: [string(name: "SOURCE_URL", value: "${registry_image}"), string(name: "QUAY_IMAGE_TAG", value: "ocs-olm-operator:latest-stable")]
+ build job: 'quay-tag-image', parameters: [string(name: "SOURCE_URL", value: "${registry_image}"), string(name: "QUAY_IMAGE_TAG", value: "ocs-olm-operator:latest-stable-${ocs_version}")]
if( env.UMB_MESSAGE in [true, 'true'] ) {
def registry_version = registry_tag.split('-')[0]
def properties = """
|
Formatting
[skip-ci] | @@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: memtire
- image: 100.64.176.12:80/wca/memtier_benchmark:d3ca4bb06f80a8eb83de6116ce6b7c3abe7ca153
+ image: 100.64.176.12:80/wca/memtier_benchmark:5fcfe9599d4ddf7bb6ecde3361bba547a23a3626
command:
- bash
- -c
|
swarming_bot: silence IOError during time.sleep()
This happens when time.sleep() calls returns with '[Errno 4] Interrupted
function call' which gets converted to an IOError exception, as the host is
shutting down. | @@ -1319,6 +1319,7 @@ def host_reboot(message=None, timeout=None):
"""
# The shutdown process sends SIGTERM and waits for processes to exit. It's
# important to not handle SIGTERM and die when needed.
+ # TODO(maruel): We may want to die properly here.
signal.signal(signal.SIGTERM, signal.SIG_DFL)
deadline = time.time() + timeout if timeout else None
@@ -1326,10 +1327,20 @@ def host_reboot(message=None, timeout=None):
host_reboot_and_return(message)
# Sleep for 300 seconds to ensure we don't try to do anymore work while the
# OS is preparing to shutdown.
+ loop = True
+ while loop:
duration = min(300, deadline - time.time()) if timeout else 300
- if duration > 0:
+ if duration <= 0:
+ break
logging.info('Sleeping for %s', duration)
+ try:
time.sleep(duration)
+ loop = False
+ except IOError as e:
+ # Ignore "[Errno 4] Interrupted function call"; we do not want the
+ # process to die, so let's sleep again until the OS forcibly kill the
+ # process.
+ logging.info('Interrupted sleep: %s', e)
if timeout and time.time() >= deadline:
logging.warning(
'Waited for host to reboot for too long (%s); aborting', timeout)
|
BUG: updated numpy version
Updated the numpy version to be specific. | @@ -18,7 +18,7 @@ jobs:
numpy_ver: "1.20"
os: ubuntu-latest
- python-version: "3.6.8"
- numpy_ver: "oldest-supported-numpy"
+ numpy_ver: "1.19.5"
os: "ubuntu-20.04"
name: Python ${{ matrix.python-version }} on ${{ matrix.os }} with numpy ${{ matrix.numpy_ver }}
|
Using html.unescape() instead of HTMLParser.unescape()
The unescape method is deprecated and will be removed in 3.5 | @@ -2,7 +2,7 @@ import re
from datetime import datetime, timedelta
from email.utils import formataddr
-from html.parser import HTMLParser
+from html import unescape
from django.conf import settings
from django.forms import ValidationError
@@ -242,8 +242,7 @@ def notify_about_activity_log(addon, version, note, perm_setting=None,
with translation.override(settings.LANGUAGE_CODE):
comments = '%s' % amo.LOG_BY_ID[note.action].short
else:
- htmlparser = HTMLParser()
- comments = htmlparser.unescape(comments)
+ comments = unescape(comments)
# Collect add-on authors (excl. the person who sent the email.) and build
# the context for them.
|
Fix example for new_zeros in documentation
Fix for Issue | @@ -139,9 +139,9 @@ Args:
Example::
>>> tensor = torch.tensor((), dtype=torch.float64)
- >>> tensor.new_ones((2, 3))
- tensor([[ 1., 1., 1.],
- [ 1., 1., 1.]], dtype=torch.float64)
+ >>> tensor.new_zeros((2, 3))
+ tensor([[ 0., 0., 0.],
+ [ 0., 0., 0.]], dtype=torch.float64)
""".format(**new_common_args))
|
Modified code snippets for `exclude_package_data` example
Made them consistent with the snippets given on the Package Discovery
page. The changes made here are similar to the changes made to the
previous example. | @@ -285,10 +285,9 @@ included in the installation, then you could use the ``exclude_package_data`` op
[options]
# ...
- packages =
- mypkg
+ packages = find:
package_dir =
- mypkg = src
+ = src
include_package_data = True
[options.exclude_package_data]
@@ -299,11 +298,11 @@ included in the installation, then you could use the ``exclude_package_data`` op
.. code-block:: python
- from setuptools import setup
+ from setuptools import setup, find_packages
setup(
# ...,
- packages=["mypkg"],
- package_dir={"mypkg": "src"},
+ packages=find_packages(where="src"),
+ package_dir={"": "src"},
include_package_data=True,
exclude_package_data={"mypkg": ["README.txt"]},
)
@@ -312,10 +311,8 @@ included in the installation, then you could use the ``exclude_package_data`` op
.. code-block:: toml
- [tool.setuptools]
- # ...
- packages = ["mypkg"]
- package-dir = { mypkg = "src" }
+ [tool.setuptools.packages.find]
+ where = ["src"]
[tool.setuptools.exclude-package-data]
mypkg = ["README.txt"]
|
add test to plugins/ambari/client.py
Add missing test for plugins/ambari/client.py.
*test_import_credential | @@ -95,6 +95,20 @@ class AmbariClientTestCase(base.SaharaTestCase):
"http://spam", verify=False, auth=client._auth,
headers=self.headers)
+ def test_import_credential(self):
+ resp = mock.Mock()
+ resp.text = ""
+ resp.status_code = 200
+ self.http_client.post.return_value = resp
+ client = ambari_client.AmbariClient(self.instance)
+
+ client.import_credential("test", alias="credential",
+ data={"some": "data"})
+ self.http_client.post.assert_called_once_with(
+ "http://1.2.3.4:8080/api/v1/clusters/test/credentials/credential",
+ verify=False, data=jsonutils.dumps({"some": "data"}),
+ auth=client._auth, headers=self.headers)
+
def test_get_registered_hosts(self):
client = ambari_client.AmbariClient(self.instance)
resp_data = """{
|
Fix issue with checklist.
The checklist values are not and , but instead the length of the checklist is used to determine which values have been checked off. | @@ -135,7 +135,7 @@ def layout():
dcc.Checklist(
id='speck-enable-presets',
options=[{'label': 'Use presets', 'value': 'True'}],
- values=['False']
+ values=[]
),
html.Br(),
|
Fix watchmedo tests in Windows
Unexpectedly, but on test_load_config_invalid running, PyYAML get_single_data() raises different execptions for Linux and Windows. This PR adds ScannerError for passing tests under Windows. | @@ -3,7 +3,8 @@ from __future__ import unicode_literals
from watchdog import watchmedo
import pytest
-import yaml
+from yaml.constructor import ConstructorError
+from yaml.scanner import ScannerError
import os
@@ -36,7 +37,8 @@ def test_load_config_invalid(tmpdir):
).format(critical_dir)
f.write(content)
- with pytest.raises(yaml.constructor.ConstructorError):
+ # PyYAML get_single_data() raises different exceptions for Linux and Windows
+ with pytest.raises((ConstructorError, ScannerError)):
watchmedo.load_config(yaml_file)
assert not os.path.exists(critical_dir)
|
Terminal mostly done, servers enabled.
grblserver doesn't work since it's coded up for an older methodology. | @@ -944,10 +944,8 @@ class Console(Module, Pipe):
yield 'bind [<key> <command>]'
yield 'alias [<alias> <command>]'
yield '-------------------'
- yield 'consoleserver'
yield 'ruidaserver'
yield 'grblserver'
- yield 'lhyserver'
yield '-------------------'
yield 'refresh'
return
@@ -1278,7 +1276,7 @@ class Console(Module, Pipe):
self.add_element(element)
return
elif command == 'polyline':
- element = Polygon(*args)
+ element = Polygon(list(map(float, args)))
element = Path(element)
self.add_element(element)
return
@@ -1446,37 +1444,24 @@ class Console(Module, Pipe):
else:
kernel.alias[args[0]] = ' '.join(args[1:])
return
- elif command == "consoleserver":
- # TODO Not done
- return
elif command == "grblserver":
- # TODO Not done
- if 'GrblEmulator' in self.device.instances['module']:
- self.pipe = active_device.instances['module']['GrblEmulator']
- else:
- self.pipe = active_device.open('module', 'GrblEmulator')
+ port = 23
+ tcp = True
+ try:
+ server = kernel.open('module', 'LaserServer', port=port, tcp=tcp)
yield "GRBL Mode."
chan = 'grbl'
active_device.add_watcher(chan, self.channel)
yield "Watching Channel: %s" % chan
- return
- elif command == "lhyserver":
- # TODO Not done
- self.pipe = self.device.instances['modules']['LhystudioController']
- yield "Lhymicro-gl Mode."
- chan = 'lhy'
- active_device.add_watcher(chan, self.channel)
- yield "Watching Channel: %s" % chan
+ server.set_pipe(active_device.using('module', 'GrblEmulator'))
+ except OSError:
+ yield 'Server failed on port: %d' % port
return
elif command == "ruidaserver":
port = 50200
tcp = False
try:
server = kernel.open('module', 'LaserServer', port=port, tcp=tcp)
- if 'RuidaEmulator' in self.device.instances['module']:
- pipe = active_device.instances['module']['RuidaEmulator']
- else:
- pipe = active_device.open('module', 'RuidaEmulator')
yield 'Ruida Server opened on port %d.' % port
chan = 'ruida'
active_device.add_watcher(chan, self.channel)
@@ -1484,7 +1469,7 @@ class Console(Module, Pipe):
chan = 'server'
active_device.add_watcher(chan, self.channel)
yield "Watching Channel: %s" % chan
- server.set_pipe(pipe)
+ server.set_pipe(active_device.using('RuidaEmulator'))
except OSError:
yield 'Server failed on port: %d' % port
return
|
Update version.py
Bumped to v0.0.6 | @@ -4,7 +4,7 @@ Provides information on the current InvenTree version
import subprocess
-INVENTREE_SW_VERSION = "0.0.5"
+INVENTREE_SW_VERSION = "0.0.6"
def inventreeVersion():
@@ -15,7 +15,6 @@ def inventreeVersion():
def inventreeCommitHash():
""" Returns the git commit hash for the running codebase """
- # TODO - This doesn't seem to work when running under gunicorn. Why is this?!
commit = str(subprocess.check_output('git rev-parse --short HEAD'.split()), 'utf-8').strip()
return commit
|
Apply suggestions from code review
Reverting to `assertIs`. | {% elif property == "ability" -%}
def test_{{ description }}(self):
score = Character().{{ property }}()
- self.assertTrue({{ supercase["expected"] | replace("&&","and") }})
+ self.assertIs({{ supercase["expected"] | replace("&&","and") }}, True)
{% elif property == "character" -%}
def test_{{ description}}(self):
{% if ability == "hitpoints" -%}
{% set statement = statement | replace("constitution","Char.constitution") -%}
{%- endif -%}
- self.assertTrue({{ statement }})
+ self.assertIs({{ statement }}, True)
{% endfor %}
{% elif property == "strength" -%}
def test_{{ description }}(self):
Char = Character()
- self.assertTrue({{ supercase["expected"] | replace(property , ["Char.", property]|join(""))}})
+ self.assertIs({{ supercase["expected"] | replace(property , ["Char.", property]|join(""))}}, True)
{%- endif -%}
{%- endmacro %}
|
try to debug error in tests (revert me)
Error Message
object of type 'NoneType' has no len()
Stacktrace
Traceback (most recent call last):
File "/tmp/kitchen/testing/tests/unit/utils/test_network.py", line 225, in test_parse_host_port
host, port = network.parse_host_port(host_port)
File "/tmp/kitchen/testing/salt/utils/network.py", line 1957, in parse_host_port
raise _e_
TypeError: object of type 'NoneType' has no len() | @@ -1968,8 +1968,9 @@ def parse_host_port(host_port):
host = host_ip
except ValueError:
log.debug('"%s" Not an IP address? Assuming it is a hostname.', host)
- except TypeError as _e_:
- log.error('"%s" generated a TypeError exception', host)
- raise _e_
+# Todo: uncomment and handle
+# except TypeError as _e_:
+# log.error('"%s" generated a TypeError exception', host)
+# raise _e_
return host, port
|
Moved create_inbound after dao_create_service
Need to do this otherwise no service.id is available to link the servce to the inbound number | @@ -71,11 +71,11 @@ def create_service(
sms_sender=sms_sender,
)
+ dao_create_service(service, service.created_by, service_id, service_permissions=service_permissions)
+
if do_create_inbound_number and INBOUND_SMS_TYPE in service_permissions:
create_inbound_number(number=sms_sender, service_id=service.id)
- dao_create_service(service, service.created_by, service_id, service_permissions=service_permissions)
-
service.active = active
service.research_mode = research_mode
|
Set to the one we use everywhere else
There are multiple xmlns attributes in xforms, but the one we want isn't
the one on the <meta /> node, but the non-namespaced one on the data
node. | @@ -1711,7 +1711,7 @@ def _get_form_metadata_context(domain, form, timezone, support_enabled=False):
from corehq.apps.hqwebapp.templatetags.proptable_tags import get_default_definition, get_tables_as_columns
meta = form.form_data.get('meta', None) or {}
-
+ meta['@xmlns'] = form.xmlns
meta['received_on'] = json_format_datetime(form.received_on)
meta['server_modified_on'] = json_format_datetime(form.server_modified_on) if form.server_modified_on else ''
if support_enabled:
|
Gracefully handle unrecognised BNF codes
This should be a rare and temporary situation. See | @@ -148,7 +148,10 @@ def measure_numerators_by_org(request, format=None):
# Fetch names after truncating results so we have fewer to look up
names = Presentation.names_for_bnf_codes([i["bnf_code"] for i in results])
for item in results:
- item["presentation_name"] = names[item["bnf_code"]]
+ # Occasional issues with BNF code updates mean we temporarily can't
+ # recognise a BNF code until we get the latest copy of the code mapping
+ # file.
+ item["presentation_name"] = names.get(item["bnf_code"], "<Name unavailable>")
response = Response(results)
filename = "%s-%s-breakdown.csv" % (measure, org_id)
if request.accepted_renderer.format == "csv":
|
refactor: get_group_by_count
Refactored raw query and partial get_all building with db.query + qb
notation equivalent. Added type hints & f-strings.
Intent: This particular API (for field "assigned_to") was taking
a while to run so decided to refactor this in hopes of perf improvemnts.
Result: 50% reduction in response times :D | -# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
+# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
+from typing import Dict, List
+
import frappe
+from frappe.query_builder.functions import Count
+from frappe.query_builder.terms import subqry
+from frappe.query_builder.utils import DocType
@frappe.whitelist()
@@ -24,37 +29,36 @@ def set_list_settings(doctype, values):
@frappe.whitelist()
-def get_group_by_count(doctype, current_filters, field):
+def get_group_by_count(doctype: str, current_filters: str, field: str) -> List[Dict]:
current_filters = frappe.parse_json(current_filters)
- subquery_condition = ""
- subquery = frappe.get_all(doctype, filters=current_filters, run=False)
if field == "assigned_to":
- subquery_condition = " and `tabToDo`.reference_name in ({subquery})".format(subquery=subquery)
- return frappe.db.sql(
- """select `tabToDo`.allocated_to as name, count(*) as count
- from
- `tabToDo`, `tabUser`
- where
- `tabToDo`.status!='Cancelled' and
- `tabToDo`.allocated_to = `tabUser`.name and
- `tabUser`.user_type = 'System User'
- {subquery_condition}
- group by
- `tabToDo`.allocated_to
- order by
- count desc
- limit 50""".format(
- subquery_condition=subquery_condition
- ),
- as_dict=True,
+ ToDo = DocType("ToDo")
+ User = DocType("User")
+ count = Count("*").as_("count")
+ filtered_records = frappe.db.query.build_conditions(doctype, current_filters).select("name")
+
+ return (
+ frappe.qb.from_(ToDo)
+ .from_(User)
+ .select(ToDo.allocated_to.as_("name"), count)
+ .where(
+ (ToDo.status != "Cancelled")
+ & (ToDo.allocated_to == User.name)
+ & (User.user_type == "System User")
+ & (ToDo.reference_name.isin(subqry(filtered_records)))
)
- else:
- return frappe.db.get_list(
+ .groupby(ToDo.allocated_to)
+ .orderby(count)
+ .limit(50)
+ .run(as_dict=True)
+ )
+
+ return frappe.get_list(
doctype,
filters=current_filters,
- group_by="`tab{0}`.{1}".format(doctype, field),
- fields=["count(*) as count", "`{}` as name".format(field)],
+ group_by=f"`tab{doctype}`.{field}",
+ fields=["count(*) as count", f"`{field}` as name"],
order_by="count desc",
limit=50,
)
|
Update the broken link for sample cri.d/conf.yaml
Zendesk ticket: | @@ -84,7 +84,7 @@ CRI does not include any events.
Need help? Contact [Datadog support][5].
-[1]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/cri.d/conf.yaml.example
+[1]: https://github.com/DataDog/datadog-agent/blob/master/cmd/agent/dist/conf.d/cri.d/conf.yaml.default
[2]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent
[3]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent
[4]: https://github.com/DataDog/integrations-core/blob/master/cri/metadata.csv
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.