repo
stringlengths 7
60
| instance_id
stringlengths 11
64
| base_commit
stringlengths 40
40
| patch
stringlengths 237
114k
| test_patch
stringclasses 1
value | problem_statement
stringlengths 20
58k
| hints_text
stringlengths 0
67.7k
| created_at
timestamp[ns]date 2015-08-08 06:08:58
2024-12-12 22:07:22
| environment_setup_commit
stringclasses 1
value | version
stringclasses 1
value | FAIL_TO_PASS
sequencelengths 0
0
| PASS_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
AllenNeuralDynamics/TigerASI | AllenNeuralDynamics__TigerASI-56 | 1751e3ee8b61f9e4b939e86b6304ba2b4a20ae50 | diff --git a/examples/tigerasi_is_busy.py b/examples/tigerasi_is_busy.py
new file mode 100644
index 0000000..f8dbff4
--- /dev/null
+++ b/examples/tigerasi_is_busy.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+"""Connects to the Tiger Box, moves some axes, returns to starting pose."""
+
+from tigerasi.tiger_controller import TigerController
+
+
+PORT_NAME = "COM4" # a string indicating the port name.
+# port name can be left as None on Linux if udev rules were installed.
+
+print("Connecting to Tiger Controller... ", end=" ", flush=True)
+box = TigerController(PORT_NAME)
+print("done.")
+
+# Start moving the x and y axes.
+box.move_relative(x=-2000, y=-2000)
+axis_moving_states = box.are_axes_moving()
+axis_moving_states_subset = box.are_axes_moving('x', 'y')
+x_is_moving = box.is_axis_moving('x')
+print(f"moving states (all axes): {axis_moving_states}")
+print(f"moving states (x and y): {axis_moving_states_subset}")
+print(f"x axis moving state: {x_is_moving}")
+box.move_relative(x=2000, y=2000)
\ No newline at end of file
diff --git a/src/tigerasi/tiger_controller.py b/src/tigerasi/tiger_controller.py
index ac2e32a..100d993 100644
--- a/src/tigerasi/tiger_controller.py
+++ b/src/tigerasi/tiger_controller.py
@@ -1045,35 +1045,57 @@ def get_ttl_output_state(self, wait: bool = True):
return bool(int(reply.lstrip(':A ')))
def is_moving(self):
- """blocks. True if any axes is moving. False otherwise."""
+ """True if any axes is moving. False otherwise. Blocks."""
+ return self.are_axes_moving()
+
+ def is_axis_moving(self, axis: str):
+ """True if the specified axis is moving. False otherwise. Blocks."""
+ return next(iter(self.are_axes_moving(axis).items()))[-1] # True or False
+
+ @axis_check()
+ def are_axes_moving(self, *axes: str):
+ """Return a dict of booleans, keyed by axis, indicating whether the
+ specified axes are (True) or are not (False) moving. Defaults to all
+ lettered axes if none are specified. Blocks. Implements
+ `RDSTAT <http://asiimaging.com/docs/products/serial_commands#commandrdstat_rs>`_
+ command."""
# Wait at least 20[ms] following the last time we sent a command.
# (Handles edge case where the last command was sent with wait=False.)
time_since_last_cmd = perf_counter() - self._last_cmd_send_time
sleep_time = REPLY_WAIT_TIME_S - time_since_last_cmd
if sleep_time > 0:
sleep(sleep_time)
- # Send the inquiry. Handle: ":A \r\n" and ":A\r\n"
- reply = self.send(f"{Cmds.STATUS.value}\r").rstrip().rstrip('\r\n')
+ if not axes: # Default to all lettered axes if none are specified.
+ axes = [x for x in self.ordered_axes if not x.isnumeric()]
+ axes_str = ''.join([f" {x.upper()}?" for x in axes])
+ # Send the inquiry. Handle: ":A \r\n" and ":A\r\n" and remove ":A " from reply
+ reply = self.send(f"{Cmds.RDSTAT.value + axes_str}\r").rstrip().rstrip('\r\n').lstrip(ACK).lstrip()
# interpret reply.
# Sometimes tigerbox replies with ACK to this cmd instead of B or N.
# Re-issue cmd if we received an ACK.
if reply == ACK:
self.log.warning("Received ':A' when we expected 'N' or 'B'. "
"Re-issuing command.")
- reply = self.send(f"{Cmds.STATUS.value}\r").rstrip('\r\n').strip()
- if reply == "B":
- return True
- elif reply == "N":
- return False
- else:
- raise RuntimeError(f"Error. Cannot tell if device is moving. "
+ reply = self.send(f"{Cmds.RDSTAT.value + axes_str}\r").rstrip().rstrip('\r\n').lstrip(ACK).lstrip()
+
+ axis_states = list(reply)
+ if 'B' not in reply and 'N' not in reply:
+ raise RuntimeError(f"Error. Cannot tell if axes are moving. "
f"Received: '{reply}'")
+ return {x.upper(): state == 'B' for x, state in zip(axes, axis_states)}
def wait(self):
"""Block until tigerbox is idle."""
while self.is_moving():
pass
+ # TODO: this needs to be tested
+ @axis_check()
+ def wait_on_axis(self, *axes: str):
+ """Block until specified axis is idle."""
+ while True in self.is_axis_moving(*axes).items():
+ pass
+
def clear_incoming_message_queue(self):
"""Clear input buffer and reset skipped replies."""
self.skipped_replies = 0
| genericize is_moving
I made some changes to collapse the two functions into one while preserving your function signature. (This needs to be tested. Is there an open machine I can test this on?)
| 2023-10-18T20:35:16 | 0.0 | [] | [] |
|||
isi-nlp/boteval | isi-nlp__boteval-20 | f316499244f8b46a66967592cf656827be90e06e | diff --git a/boteval/controller.py b/boteval/controller.py
index fef86b9..95a91b6 100644
--- a/boteval/controller.py
+++ b/boteval/controller.py
@@ -102,29 +102,24 @@ def login():
ext_id = request.values.get('ext_id')
ext_src = request.values.get('ext_src')
- seamless_login = bool(service.config.is_seamless_crowd_login)
- if seamless_login and (not ext_id or not ext_src):
- log.warning(f'seamless login both {ext_id=} and {ext_src=}')
- seamless_login = False
-
# for seamless login, we still have to show terms and conditions,
tmpl_args = dict(
action=request.values.get('action', 'login'),
next=next_url,
ext_id=ext_id,
ext_src=ext_src,
- seamless_login=seamless_login,
onboarding=service.onboarding)
log.info(f"login/signup. next={next_url} | ext: src: {ext_src} id: {ext_id}")
if request.method == 'GET':
return render_template('login.html', **tmpl_args)
# form sumission as POST
- log.info(f'Form:: {request.form}')
args = dict(request.form)
user_id = args.pop('user_id')
secret = args.pop('secret')
+ log.info(f'Form:: {user_id} {args}')
action = args.pop('action', 'login')
+
assert action in ('login', 'signup')
if action == 'login':
user = User.get(user_id)
@@ -153,13 +148,6 @@ def login():
ext_id = args.pop('ext_id', None)
ext_src = args.pop('ext_src', None)
user = User.create_new(user_id, secret, name=name, ext_id=ext_id, ext_src=ext_src, data=args)
- if seamless_login:
- FL.login_user(user, remember=True, force=True)
- flask.flash('Logged in automatically')
- if next_url and is_safe_url(next_url):
- return flask.redirect(next_url)
- return flask.redirect(flask.url_for('app.index'))
-
tmpl_args['action'] = 'login'
flask.flash(f'Sign up success. Try login with your user ID: {user.id}. Verify that it works and write down the password for future logins.')
return render_template('login.html', **tmpl_args)
@@ -169,6 +157,46 @@ def login():
return render_template('login.html', user_id=user_id, **tmpl_args)
+ @router.route('/seamlesslogin', methods=['GET', 'POST'])
+ def seamlesslogin():
+ next_url = request.values.get('next')
+ ext_id = request.values.get('ext_id')
+ ext_src = request.values.get('ext_src')
+
+ if not ext_id or not ext_src:
+ log.warning(f'seamless login requires both {ext_id=} and {ext_src=}')
+ # return to normal login
+ return login()
+
+ # for seamless login, we still have to show terms and conditions,
+ tmpl_args = dict(
+ next=next_url,
+ ext_id=ext_id,
+ ext_src=ext_src,
+ onboarding=service.onboarding)
+ log.info(f"login/signup. next={next_url} | ext: src: {ext_src} id: {ext_id}")
+ if request.method == 'GET': # for GET, show terms,
+ return render_template('seamlesslogin.html', **tmpl_args)
+
+ # form sumission as POST => create a/c
+ args = dict(request.form)
+ user_id = args.pop('user_id')
+ secret = args.pop('secret')
+ user = User.get(user_id)
+ log.info(f'Form:: {user_id} {args}')
+ if user:# user already exists
+ log.warning(f'User {user_id} lready exists')
+ else:
+ name = args.pop('name', None)
+ user = User.create_new(user_id, secret, name=name, ext_id=ext_id, ext_src=ext_src, data=args)
+
+ FL.login_user(user, remember=True, force=True)
+ flask.flash('Logged in automatically')
+ if next_url and is_safe_url(next_url):
+ return flask.redirect(next_url)
+ return flask.redirect(flask.url_for('app.index'))
+
+
@router.route('/logout', methods=['GET'])
@FL.login_required
def logout():
@@ -181,8 +209,8 @@ def about():
return render_template('about.html')
@router.route('/instructions', methods=['GET'])
- def instructions():
- return render_template('page.html', content=service.instructions)
+ def instructions(focus_mode=False):
+ return render_template('page.html', content=service.instructions, focus_mode=focus_mode)
@router.route('/', methods=['GET'])
@@ -348,21 +376,18 @@ def mturk_landing(topic_id): # this is where mturk worker should land first
return msg, 400
if is_previewing:
- return instructions() # sending index page for now. We can do better later
+ return instructions(focus_mode=True) # sending index page for now. We can do better later
# Step2. Find the mapping user
user = User.query.filter_by(ext_src=ext_src, ext_id=worker_id).first()
if not user: # sign up and come back (so set next)
+
return flask.redirect(
- url_for('app.login', action='signup', ext_id=worker_id,
- ext_src=ext_src, next=request.url))
- elif not FL.current_user or FL.current_user.get_id() != user.id:
+ url_for('app.seamlesslogin', ext_id=worker_id, ext_src=ext_src, next=request.url))
+
+ if not FL.current_user or FL.current_user.get_id() != user.id:
FL.logout_user()
- if seamless_login: # auto login
- FL.login_user(user, remember=True, force=True)
- else: # login and return back here
- flask.flash(f'You have an a/c with UserID={user.id} but not logged in. Please relogin as {user.id}.')
- return flask.redirect(url_for('app.login', action='login', next=request.url))
+ FL.login_user(user, remember=True, force=True)
limit_exceeded, msg = service.limit_check(topic=topic, user=user)
if limit_exceeded: # we may need to block user i.e. change user qualification
diff --git a/boteval/templates/login.html b/boteval/templates/login.html
index 8577831..5ed931e 100644
--- a/boteval/templates/login.html
+++ b/boteval/templates/login.html
@@ -32,17 +32,10 @@ <h1 class="h3 mb-3 mt-4 font-weight-normal mt-2">Please Sign In</h1>
<div class="tab-pane fade col-8" id="signup" role="tabpanel" aria-labelledby="profile-tab">
<form action="{{url_for('app.login')}}" method="POST" class="form">
<input type="hidden" name="action" value="signup">
- {% if seamless_login %}
<h1 class="h3 mb-3 mt-4 font-weight-normal">Please Sign Up</h1>
- <h1 class="h3 mb-3 mt-4 font-weight-normal">Please Review Agreement</h1>
- <input type="hidden" name="user_id" id="s_user_id" class="form-control mt-2" value="{{ext_id}}" >
- <input type="hidden" name="secret" id="s_password" class="form-control mt-2" value="{{ext_id}}">
- {%else%}
- <h1 class="h3 mb-3 mt-4 font-weight-normal">Please Sign Up</h1>
- <input type="text" name="user_id" id="s_user_id" class="form-control mt-2" placeholder="ID" required value="{{user_id}}">
- <input type="password" name="secret" id="s_password" class="form-control mt-2" placeholder="Password"
- required>
- {%endif%}
+ <input type="text" name="user_id" id="s_user_id" class="form-control mt-2" placeholder="ID" required value="{{user_id}}">
+ <input type="password" name="secret" id="s_password" class="form-control mt-2" placeholder="Password"
+ required>
{% if ext_id %} <input type="hidden" name="ext_id" value="{{ext_id}}"> {%endif%}
{% if ext_src %} <input type="hidden" name="ext_src" value="{{ext_src}}"> {%endif%}
<!--
diff --git a/boteval/templates/seamlesslogin.html b/boteval/templates/seamlesslogin.html
new file mode 100644
index 0000000..0bdf10f
--- /dev/null
+++ b/boteval/templates/seamlesslogin.html
@@ -0,0 +1,67 @@
+{% set focus_mode = True %}
+{% extends 'base.html' %}
+
+{% block content %}
+<div class="container">
+ <div class="row" id="myTabContent">
+ <div class="col-8" id="signup">
+ <form action="{{url_for('app.seamlesslogin')}}" method="POST" class="form">
+ <input type="hidden" name="action" value="signup">
+ <h1 class="h3 mb-3 mt-4 font-weight-normal">Please Review Agreement</h1>
+ <input type="hidden" name="user_id" id="s_user_id" class="form-control mt-2" value="{{ext_id}}" >
+ <input type="hidden" name="secret" id="s_password" class="form-control mt-2" value="{{ext_id}}">
+ <input type="hidden" name="ext_id" value="{{ext_id}}">
+ <input type="hidden" name="ext_src" value="{{ext_src}}">
+ {% if onboarding %}
+ {%if onboarding.get('agreement_text') %}
+ <button type="button" class="btn btn-info my-3" data-toggle="modal" data-target="#staticBackdrop">
+ Open terms and conditions
+ </button>
+ <small class="text-muted">Please refresh if the agree and sign up button doesn't get you to the task.</small>
+ {%endif%}
+ {%if onboarding.get('checkboxes') %}
+ {%for check_name, check_value in onboarding['checkboxes'].items() %}
+ <div class="form-check my-2">
+ <input class="form-check-input" type="checkbox" value="true" id="{{check_name}}" name="checkbox_{{check_name}}" required
+ autofocus>
+ <label class="form-check-label" for="{{check_name}}">
+ {{check_value|safe}}
+ </label>
+ </div>
+ {%endfor%}
+ {%endif%}
+ {%endif%}
+ <!-- Name not required; favor anonymity
+ <input type="text" name="name" id="name" class="form-control mt-2" placeholder="Name" required>
+ -->
+ {% if next %} <input type="hidden" name="next" value="{{next}}"> {%endif%}
+ <button class="btn btn-lg btn-primary btn-block col-4 mt-2" type="submit">Agree and Proceed</button>
+ </form>
+ </div>
+ </div>
+</div>
+
+{% if onboarding and onboarding.get('agreement_text') %}
+<!-- Modal has to be at the top level -->
+<div class="modal fade" id="staticBackdrop" data-backdrop="static" data-keyboard="false" tabindex="-1"
+ aria-labelledby="staticBackdropLabel" aria-hidden="true">
+ <div class="modal-dialog modal-xl">
+ <div class="modal-content">
+ <div class="modal-header">
+ <h5 class="modal-title" id="staticBackdropLabel"></h5>
+ <button type="button" class="close" data-dismiss="modal" aria-label="Close">
+ <span aria-hidden="true">×</span>
+ </button>
+ </div>
+ <div class="modal-body">
+ {{onboarding['agreement_text']|safe}}
+ </div>
+ <div class="modal-footer">
+ <button type="button" class="btn btn-primary" data-dismiss="modal">Agree</button>
+ </div>
+ </div>
+ </div>
+</div>
+{%endif%}
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/01-getting-started.adoc b/docs/01-getting-started.adoc
index 78c8ee3..835c745 100644
--- a/docs/01-getting-started.adoc
+++ b/docs/01-getting-started.adoc
@@ -334,9 +334,8 @@ mturk:
<5> cross references using `&` and `*` for reusing previously defined limits
- MTurk integration is achieved via link:https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_ExternalQuestionArticle.html[ExternalQuestion^]
-
-* However, _ExternalQuestion_ requires hosting our webservice over HTTPS, which require SSL certificate. See <<#nginx>>.
+MTurk integration is achieved via link:https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_ExternalQuestionArticle.html[ExternalQuestion^]
+However, _ExternalQuestion_ requires hosting our webservice over HTTPS, which require SSL certificate. See <<#nginx>>.
TIP: When the task is done, we need to submit a form back to MTurk informing the completion. So, Mturk worker gets an additional screen at the end of task where they click a button to notify the task completion to mturk.
diff --git a/docs/v0.1/index.html b/docs/v0.1/index.html
index b5d6fa7..b4b34bf 100644
--- a/docs/v0.1/index.html
+++ b/docs/v0.1/index.html
@@ -1275,22 +1275,14 @@ <h3 id="conf-mturk">4.6. Crowd: MTurk Settings</h3>
</tr>
<tr>
<td><i class="conum" data-value="5"></i><b>5</b></td>
-<td>cross references using <code>&</code> and <code>*</code> for reusing previously defined limits
-<div class="literalblock">
-<div class="content">
-<pre>MTurk integration is achieved via link:https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_ExternalQuestionArticle.html[ExternalQuestion^]</pre>
-</div>
-</div>
-<div class="ulist">
-<ul>
-<li>
-<p>However, <em>ExternalQuestion</em> requires hosting our webservice over HTTPS, which require SSL certificate. See <a href="#nginx">Section 6, “HTTPS with Nginx Reverse Proxy”</a>.</p>
-</li>
-</ul>
-</div></td>
+<td>cross references using <code>&</code> and <code>*</code> for reusing previously defined limits</td>
</tr>
</table>
</div>
+<div class="paragraph">
+<p>MTurk integration is achieved via <a href="https://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_ExternalQuestionArticle.html" target="_blank" rel="noopener">ExternalQuestion</a>
+However, <em>ExternalQuestion</em> requires hosting our webservice over HTTPS, which require SSL certificate. See <a href="#nginx">Section 6, “HTTPS with Nginx Reverse Proxy”</a>.</p>
+</div>
<div class="admonitionblock tip">
<table>
<tr>
| sign-up button doesn't directly lead to task
| [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) is probably the culprit.
We load iframe with https and one of the redirect was over http, and the chrome browser stopped it.
I think we should simplify our `/login`
https://github.com/isi-nlp/boteval/blob/f316499244f8b46a66967592cf656827be90e06e/boteval/controller.py#L100-L169
Lets create simplified login and ensure redirect be over https if request url scheme is https
Related https://stackoverflow.com/a/58648040/1506477 | 2022-10-12T20:05:15 | 0.0 | [] | [] |
||
tensorchord/modelz-llm | tensorchord__modelz-llm-85 | 484adbb59587b0fd09c4c7145c7ff6c4190d1d62 | diff --git a/src/modelz_llm/model.py b/src/modelz_llm/model.py
index 21a4afb..f53aaf1 100644
--- a/src/modelz_llm/model.py
+++ b/src/modelz_llm/model.py
@@ -6,9 +6,9 @@
from llmspec import (
ChatCompletionRequest,
ChatResponse,
+ CompletionResponse,
LanguageModels,
PromptCompletionRequest,
- Role,
)
from modelz_llm.log import logger
@@ -91,26 +91,36 @@ def get_prompt_from_req(
return req.get_prompt()
def __call__(
- self, req: Union[ChatCompletionRequest, PromptCompletionRequest]
- ) -> ChatResponse:
- """Generate ChatCompletionResponse for ChatCompletionRequest."""
+ self, req: Union[ChatResponse, CompletionResponse]
+ ) -> Union[ChatResponse, ChatCompletionRequest]:
+ """Generate chat or completion response."""
+ resp_cls = (
+ ChatResponse
+ if isinstance(req, ChatCompletionRequest)
+ else CompletionResponse
+ )
if self.model_spec is not LanguageModels.CHAT_GLM.value:
- return list(self.step_generate(req))[0]
+ return list(self.step_generate(req, resp_cls=resp_cls))[0]
tokens = self.token_encode(self.get_prompt_from_req(req))
input_length = len(tokens[0])
outputs = self.generate(tokens, **req.get_inference_args(self.model_name))[0]
message = self.token_decode(outputs[input_length:])
- return ChatResponse.from_message(
+ return resp_cls.from_message(
message=message,
- role=Role.ASSISTANT,
model=self.model_name,
finish_reason=None,
prompt_token=input_length,
completion_token=len(outputs) - input_length,
)
- def step_generate(self, req: ChatCompletionRequest, echo=False, stream_interval=1):
+ def step_generate(
+ self,
+ req: ChatCompletionRequest,
+ resp_cls: Union[ChatResponse, CompletionResponse],
+ echo=False,
+ stream_interval=1,
+ ):
"""Ref to FastChat.
https://github.com/lm-sys/FastChat/blob/8e38141ff5dd15f3138ccfd312dd73a471e986a1/fastchat/serve/inference.py#L58
@@ -256,13 +266,12 @@ def step_generate(self, req: ChatCompletionRequest, echo=False, stream_interval=
else:
finish_reason = None
- yield ChatResponse.from_message(
- output,
- Role.ASSISTANT,
- self.model_name,
- finish_reason,
- input_length,
- i,
+ yield resp_cls.from_message(
+ message=output,
+ model=self.model_name,
+ finish_reason=finish_reason,
+ prompt_token=input_length,
+ completion_token=i,
)
# clean
| bug: Completion request returns wrong response
https://platform.openai.com/docs/api-reference/completions/create
It should return:
```
{
"id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7",
"object": "text_completion",
"created": 1589478378,
"model": "text-davinci-003",
"choices": [
{
"text": "\n\nThis is indeed a test",
"index": 0,
"logprobs": null,
"finish_reason": "length"
}
],
"usage": {
"prompt_tokens": 5,
"completion_tokens": 7,
"total_tokens": 12
}
}
```
But we get:
```
{
"id": "1306c25e-968f-4a02-adcb-5bcfbd19524a",
"object": "chat",
"created": "2023-06-20T19:58:12.437203",
"model": "bigscience/bloomz-560m",
"usage": {
"prompt_tokens": 7,
"completion_tokens": 5,
"total_tokens": 12
},
"choices": [
{
"message": {
"content": " The Dark Knight Rises",
"role": "assistant",
"name": ""
},
"index": 0,
"finish_reason": "stop"
}
]
}
```
| 2023-06-21T05:17:51 | 0.0 | [] | [] |
|||
agriyakhetarpal/hugo-python-distributions | agriyakhetarpal__hugo-python-distributions-12 | f22f696e705a69c4e5bb3cbadad4cab3a47991e3 | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 1f31a10..4837d2e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -51,7 +51,7 @@ jobs:
run: choco install mingw
- name: Restore Hugo builder cache
- uses: actions/[email protected]
+ uses: actions/cache/restore@v3
with:
path: ./hugo_cache/
key: ${{ runner.os }}-${{ matrix.go-version }}-hugo-build-cache-${{ hashFiles('**/setup.py', '**/pyproject.toml') }}
@@ -63,5 +63,11 @@ jobs:
run: |
python -m build --wheel . --outdir dist/
+ - name: Save Hugo builder cache
+ uses: actions/cache/save@v3
+ with:
+ path: ./hugo_cache/
+ key: ${{ runner.os }}-${{ matrix.go-version }}-hugo-build-cache-${{ hashFiles('**/setup.py', '**/pyproject.toml') }}
+
- name: Test entry points for package
run: nox -s venv
diff --git a/README.md b/README.md
index 7564fda..3aa713c 100644
--- a/README.md
+++ b/README.md
@@ -22,11 +22,14 @@ pip install hugo-python
This places a `hugo` executable in a `binaries` directory in your virtual environment and adds an entry point to it.
-> [!IMPORTANT]
-> It is currently necessary to use a virtual environment to install and use isolated version of Hugo. Please refer to https://github.com/agriyakhetarpal/hugo-python-distributions/issues/7
+Alternatively, you can install the package globally on your system:
+
+```bash
+pip3 install hugo-python
+```
> [!TIP]
-> You can, however, use [`pipx`](https://github.com/pypa/pipx) to install Hugo in an isolated environment without having to create a virtual environment manually, allowing you to use Hugo as a command-line tool without having to install it globally on your system. Please refer to the [`pipx` documentation](https://pipx.pypa.io/stable/) for more information.
+> It is a great idea to use [`pipx`](https://github.com/pypa/pipx) to install Hugo in an isolated location without having to create a virtual environment, which will allow you to use Hugo as a command-line tool without having to install it globally on your system. Please refer to the [`pipx` documentation](https://pipx.pypa.io/stable/) for more information.
Then, you can use the `hugo` commands as you would normally:
@@ -52,15 +55,13 @@ For more information on using Hugo and its command-line interface, please refer
## Supported platforms
-<!-- Add a table -->
| Platform | Architecture | Supported |
| -------- | ------------ | ---------------- |
| macOS | x86_64 | â
|
-| macOS | arm64 | Coming soon |
+| macOS | arm64 | â
|
| Linux | amd64 | â
|
| Linux | arm64 | Coming soon |
| Windows | x86_64 | â
|
-| Windows | arm64 | Coming soon |
## Building from source
@@ -87,6 +88,27 @@ pip install -e . # editable installation
pip install . # regular installation
```
+### Cross-compiling for different architectures
+
+> [!NOTE]
+> This functionality is implemented just for macOS at the moment, but it can be extended to other platforms as well in the near future.
+
+This package is capable of cross-compiling Hugo binaries for the same platform but different architectures and it can be used as follows.
+
+Say, on an Intel-based (x86_64) macOS machine:
+
+```bash
+export GOARCH="arm64"
+pip install . # or pip install -e .
+```
+
+This will build a macOS arm64 binary distribution of Hugo that can be used on Apple Silicon-based (arm64) macOS machines. To build a binary distribution for the _target_ Intel-based (x86_64) macOS platform on the _host_ Apple Silicon-based (arm64) macOS machine, you can use the following command:
+
+```bash
+export GOARCH="amd64"
+pip install . # or pip install -e .
+```
+
## Background
Binaries for the Hugo static site generator are available for download from the [Hugo releases page](https://github.com/gohugoio/hugo/releases). These binaries have to be downloaded and placed in an appropriate location on the system manually and the PATH environment variable has to be updated to include said location.
diff --git a/licenses/LICENSE-hugo.txt b/licenses/LICENSE-hugo.txt
index 3a8e444..ad3850b 100644
--- a/licenses/LICENSE-hugo.txt
+++ b/licenses/LICENSE-hugo.txt
@@ -198,4 +198,4 @@
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
- limitations under the License.
\ No newline at end of file
+ limitations under the License.
diff --git a/pyproject.toml b/pyproject.toml
index 8a2df53..2ba06b4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,5 @@
[build-system]
-requires = ["setuptools>=61", "pooch", "tqdm", "wheel"]
+requires = ["setuptools>=64", "pooch", "tqdm", "wheel==0.42.0"]
build-backend = "setuptools.build_meta"
[project]
diff --git a/python_hugo/__init__.py b/python_hugo/__init__.py
index 16bcf71..abdf26f 100644
--- a/python_hugo/__init__.py
+++ b/python_hugo/__init__.py
@@ -6,65 +6,8 @@
from __future__ import annotations
-import os
-import sys
+from python_hugo.cli import __call
-import importlib.metadata
-
-HUGO_VERSION = importlib.metadata.version("python-hugo")
-FILE_EXT = ".exe" if sys.platform == "win32" else ""
-
-# On editable and source installs, we keep binaries in the package directory
-# for ease of use. On wheel installs, we keep them in the venv/binaries directory.
-# On installing from a wheel, the binary is in the venv/binaries, but the package
-# is in venv/lib/python3.X/site-packages, so we need to go up two directories and
-# then down into binaries
-# Note: on Windows, this is venv\Lib\site-packages (instead of venv/lib/python3.X/site-packages)
-# therefore we need to go up to the venv directory and then down into the data files
-try:
- hugo_executable = os.path.join(
- os.path.dirname(__file__),
- "binaries",
- f"hugo-{HUGO_VERSION}" + FILE_EXT,
- )
- if not os.path.exists(hugo_executable):
- raise FileNotFoundError
-except FileNotFoundError:
- if sys.platform == "win32":
- PATH_TO_SEARCH = os.path.join(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(__file__)
- )
- )
- ),
- "binaries"
- ) # four times instead of five
- else:
- # five times instead of four
- PATH_TO_SEARCH = os.path.join(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(
- os.path.dirname(__file__)
- )
- )
- )
- ),
- "binaries"
- )
-
- # Go up into the venv directory and down into the data files
- hugo_executable = os.path.join(PATH_TO_SEARCH, f"hugo-{HUGO_VERSION}" + FILE_EXT)
- if not os.path.exists(hugo_executable):
- raise FileNotFoundError from None
-except Exception as e:
- sys.exit(f"Error: {e}")
-
-def __call():
- """
- Hugo binary entry point. Passes all command-line arguments to Hugo.
- """
- os.execvp(hugo_executable, ["hugo", *sys.argv[1:]])
+# Hugo binary entry point caller
+if __name__ == "__main__":
+ __call()
diff --git a/python_hugo/binaries/.gitignore b/python_hugo/binaries/.gitignore
new file mode 100644
index 0000000..f935021
--- /dev/null
+++ b/python_hugo/binaries/.gitignore
@@ -0,0 +1,1 @@
+!.gitignore
diff --git a/python_hugo/cli.py b/python_hugo/cli.py
new file mode 100644
index 0000000..b61ac9e
--- /dev/null
+++ b/python_hugo/cli.py
@@ -0,0 +1,46 @@
+"""
+Copyright (c) 2023 Agriya Khetarpal. All rights reserved.
+
+python-hugo: Binaries for the Hugo static site generator, installable with pip
+"""
+
+from __future__ import annotations
+
+import importlib.metadata
+import os
+import platform
+import sys
+from functools import lru_cache
+
+HUGO_VERSION = importlib.metadata.version("python-hugo")
+FILE_EXT = ".exe" if sys.platform == "win32" else ""
+HUGO_PLATFORM = {
+ "darwin": "darwin",
+ "linux": "linux",
+ "win32": "windows",
+}[sys.platform]
+HUGO_ARCH = {
+ "x86_64": "amd64",
+ "arm64": "arm64",
+ "AMD64": "amd64",
+ "aarch64": "arm64",
+}[platform.machine()]
+
+
+@lru_cache(maxsize=1)
+def hugo_executable():
+ """
+ Returns the path to the Hugo executable.
+ """
+ return os.path.join(
+ os.path.dirname(__file__),
+ "binaries",
+ f"hugo-{HUGO_VERSION}-{HUGO_PLATFORM}-{HUGO_ARCH}" + FILE_EXT,
+ )
+
+
+def __call():
+ """
+ Hugo binary entry point. Passes all command-line arguments to Hugo.
+ """
+ os.execvp(hugo_executable(), ["hugo", *sys.argv[1:]])
diff --git a/setup.py b/setup.py
index 0b069ad..55e9a8e 100644
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,17 @@
+import glob
import os
-import sys
+import platform
+import shutil
import subprocess
+import sys
import tarfile
-
-from setuptools import setup, Extension
-from setuptools.command.build_ext import build_ext
+from pathlib import Path
import pooch
+from setuptools import Command, Extension, setup
+from setuptools.command.build_ext import build_ext
+from wheel.bdist_wheel import bdist_wheel, get_platform
+from wheel.macosx_libfile import calculate_macosx_platform_tag
# Keep in sync with pyproject.toml and update SHA-256 hashes accordingly
HUGO_VERSION = "0.120.4"
@@ -14,25 +19,58 @@
f"https://github.com/gohugoio/hugo/archive/refs/tags/v{HUGO_VERSION}.tar.gz"
)
-# Pooch will download the tarball into the OS cache directory.
+# The pooch tool will download the tarball into the hugo_cache/ directory.
# We will point the build command to that location to build Hugo from source
HUGO_CACHE_DIR = "hugo_cache"
HUGO_SHA256 = "e374effe369c340d8085060e6bb45337eabf64cfe075295432ecafd6d033eb8b"
FILE_EXT = ".exe" if sys.platform == "win32" else ""
+# Normalise platform strings to match the Go toolchain
+HUGO_PLATFORM = {
+ "darwin": "darwin",
+ "linux": "linux",
+ "win32": "windows",
+}[sys.platform]
+
+# Normalise architecture strings to match the Go toolchain
+HUGO_ARCH = {
+ "x86_64": "amd64",
+ "arm64": "arm64",
+ "AMD64": "amd64",
+ "aarch64": "arm64",
+}[platform.machine()]
+
class HugoBuilder(build_ext):
"""
- Custom build_ext command that builds Hugo from source
+ Custom extension command that builds Hugo from source, placing the binary into
+ the package directory for further use.
"""
def initialize_options(self):
super().initialize_options()
self.hugo_version = None
+ self.hugo_platform = None
+ self.hugo_arch = None
def finalize_options(self):
+ # Platforms and architectures that we will build Hugo for are:
+ # i.e., a subset of "go tool dist list":
+ # 1. darwin/amd64
+ # 2. darwin/arm64
+ # 3. linux/amd64
+ # 4. linux/arm64
+ # 5. windows/amd64
+ # The platform is the first part of the string, the architecture is the second.
+ # We need to mangle the hugo binary name to include the platform and architecture
+ # so that we can build Hugo for multiple platforms and architectures.
+ # The platform is used to set the GOOS environment variable, the architecture
+ # is used to set the GOARCH environment variable, and they must be exactly these
+ # strings for the Go toolchain to work.
super().finalize_options()
self.hugo_version = HUGO_VERSION
+ self.hugo_platform = HUGO_PLATFORM
+ self.hugo_arch = HUGO_ARCH
def run(self):
"""
@@ -40,7 +78,7 @@ def run(self):
# the name so that it is unique to the verion of Hugo being built.
"""
- # Download Hugo source tarball, place into OS cache directory
+ # Download Hugo source tarball, place into hugo_cache/ directory
hugo_targz = pooch.retrieve(
url=HUGO_RELEASE,
known_hash=HUGO_SHA256,
@@ -48,18 +86,28 @@ def run(self):
progressbar=True,
)
- # Extract Hugo source tarball into a folder of the same name in the OS cache directory
+ # Extract Hugo source tarball into a folder hugo-HUGO_VERSION/
+ # inside hugo_cache/
with tarfile.open(hugo_targz) as tar:
tar.extractall(path=HUGO_CACHE_DIR)
- # The binary is put into GOBIN, which is set to the package directory (python_hugo/binaries)
- # for use in editable mode. The binary is copied into the wheel afterwards
+ # The binary is put into GOBIN, which is set to the package directory
+ # (python_hugo/binaries/) for use in editable mode. The binary is copied
+ # into the wheel afterwards
+ # Error: GOBIN cannot be set if GOPATH is set when compiling for different
+ # architectures, so we use the default GOPATH/bin as the place to copy
+ # binaries from
+ # os.environ["GOBIN"] = os.path.join(
+ # os.path.dirname(os.path.abspath(__file__)), "python_hugo", "binaries"
+ # )
os.environ["CGO_ENABLED"] = "1"
- os.environ["GOBIN"] = os.path.join(
- os.path.dirname(os.path.abspath(__file__)), "python_hugo", "binaries"
- )
+ os.environ["GOPATH"] = os.path.abspath(
+ HUGO_CACHE_DIR
+ ) # must be absolute (Go requirement)
- os.environ["GOPATH"] = os.path.abspath(HUGO_CACHE_DIR)
+ os.environ["GOOS"] = self.hugo_platform
+ os.environ["GOARCH"] = os.environ.get("GOARCH", self.hugo_arch)
+ # i.e., allow override if GOARCH is set!
# Build Hugo from source using the Go toolchain, place it into GOBIN
# Requires the following dependencies:
@@ -69,6 +117,12 @@ def run(self):
# 3. Git
#
# Once built this the files are cached into GOPATH for future use
+
+ # Delete hugo_cache/bin/ + files inside, it left over from a previous build
+ shutil.rmtree(
+ os.path.join(os.path.abspath(HUGO_CACHE_DIR), "bin"), ignore_errors=True
+ )
+
subprocess.check_call(
[
"go",
@@ -80,28 +134,169 @@ def run(self):
)
# TODO: introduce some error handling here to detect compilers, etc.
- # Mangle the name of the compiled executable to include the version
- # of Hugo being built
- original_name = os.path.join(os.environ.get("GOBIN"), "hugo" + FILE_EXT)
+ # Mangle the name of the compiled executable to include the version, the
+ # platform, and the architecture of Hugo being built.
+ # The binary is present in GOPATH (i.e, either at hugo_cache/bin/ or at
+ # hugo_cache/bin/$GOOS_$GOARCH/bin) and now GOBIN is not set, so we need
+ # to copy it from there.
+
+ # If the GOARCH is not the same as self.hugo_arch, we are cross-compiling, so
+ # we need to go into the GOOS_GOARCH/bin folder to find the binary rather than
+ # the GOPATH/bin folder.
+
+ if os.environ.get("GOARCH") != self.hugo_arch:
+ original_name = os.path.join(
+ os.environ.get("GOPATH"),
+ "bin",
+ f"{self.hugo_platform}_{os.environ.get('GOARCH')}",
+ "hugo" + FILE_EXT,
+ )
+ else:
+ original_name = os.path.join(
+ os.environ.get("GOPATH"), "bin", "hugo" + FILE_EXT
+ )
+
new_name = os.path.join(
- os.environ.get("GOBIN"), f"hugo-{HUGO_VERSION}" + FILE_EXT
+ os.environ.get("GOPATH"),
+ "bin",
+ f"hugo-{HUGO_VERSION}-{self.hugo_platform}-{os.environ.get('GOARCH', self.hugo_arch)}"
+ + FILE_EXT,
)
os.rename(original_name, new_name)
+ # Copy the new_name file into a folder binaries/ inside python_hugo/
+ # so that it is included in the wheel.
+ # basically we are copying hugo-HUGO_VERSION-PLATFORM-ARCH into
+ # python_hugo/binaries/ and creating the folder if it does not exist.
+
+ binaries_dir = os.path.join(
+ os.path.dirname(__file__), "python_hugo", "binaries"
+ )
+ if not os.path.exists(binaries_dir):
+ os.mkdir(binaries_dir)
+
+ # if the binary already exists, delete it, and then copy the new binary
+ # to ensure that the binary is always the newest rendition
+ if os.path.exists(os.path.join(binaries_dir, os.path.basename(new_name))):
+ os.remove(os.path.join(binaries_dir, os.path.basename(new_name)))
+ os.rename(new_name, os.path.join(binaries_dir, os.path.basename(new_name)))
+
+
+# https://github.com/pypa/setuptools/issues/1347: setuptools does not support
+# the clean command from distutils yet. so we need to use a workaround that gets
+# called inside bdist_wheel invocation.
+class Cleaner(Command):
+ """
+ Custom command that cleans the build directory of the package at the project root.
+ """
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ """Clean ancillary files at runtime."""
+
+ here = os.path.normpath(os.path.abspath(os.path.dirname(__file__)))
+ files_to_clean = "./build ./*.pyc ./*.egg-info ./__pycache__".split(" ")
+
+ for path_spec in files_to_clean:
+ # Make paths absolute and relative to this path
+ abs_paths = glob.glob(
+ os.path.normpath(os.path.join(here, path_spec))
+ )
+ for path in [str(p) for p in abs_paths]:
+ if not path.startswith(here):
+ # raise error if path in files_to_clean is absolute + outside
+ # this directory
+ msg = f"{path} is not a path around {here}"
+ raise ValueError(msg)
+ shutil.rmtree(path)
+
+
+# Mock setuptools into thinking that we are building a target binary on a host machine
+# so that the wheel gets tagged correctly. We can fuse the arm64 and amd64 wheels
+# together later using delocate.
+class HugoWheel(bdist_wheel):
+ """
+ A customised wheel build command that sets the platform tags to accommodate
+ the varieties of the GOARCH and GOOS environment variables when cross-compiling
+ the Hugo binary. Currently used for macOS arm64 and macOS x86_64.
+ """
+
+ def initialize_options(self):
+ super().initialize_options()
+
+ def finalize_options(self):
+ # plat_name is essentially the {platform tag} at the end of the wheel name.
+ # Note to self: the wheel name will look like this:
+ # {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
+ # # on macOS, if GOARCH is set to arm64 on an x86_64 machine, or if GOARCH is set to
+ # amd64 on an arm64 machine, we need to set the platform tag to macosx_X_Y_arm64 or
+ # macosx_X_Y_x86_64 respectively.
+ #
+ #
+ # TODO: FIXME: look at how Linux and Windows tags are set later
+
+ if sys.platform == "darwin":
+ platform_tag = get_platform("_")
+ # ensure correct platform tag for macOS arm64 and macOS x86_64
+ if "arm64" in platform_tag and os.environ.get("GOARCH") == "amd64":
+ self.plat_name = platform_tag.replace("arm64", "x86_64")
+ if "x86_64" in platform_tag and os.environ.get("GOARCH") == "arm64":
+ self.plat_name = platform_tag.replace("x86_64", "arm64")
+ super().finalize_options()
+
+ def run(self):
+ self.root_is_pure = False # ensure that the wheel is tagged as a binary wheel
+
+ self.run_command("clean") # clean the build directory before building the wheel
+
+ # ensure that the binary is copied into the binaries/ folder and then into the wheel.
+ hugo_binary = os.path.join(
+ os.path.dirname(__file__),
+ "python_hugo",
+ "binaries",
+ f"hugo-{HUGO_VERSION}-{HUGO_PLATFORM}-{os.environ.get('GOARCH', HUGO_ARCH)}"
+ + FILE_EXT,
+ )
+
+ # if the binary does not exist, then we need to build it, so invoke
+ # the build_ext command again and proceed to build the binary
+ if not os.path.exists(hugo_binary):
+ self.run_command("build_ext")
+
+ # now that the binary exists, we have ensured its presence in the wheel
+ super().run()
+
setup(
- ext_modules=[Extension(name="hugo.build", sources=["setup.py"])],
- cmdclass={"build_ext": HugoBuilder},
- packages=["python_hugo", "python_hugo.binaries"]
- if os.path.exists("python_hugo/binaries")
- else ["python_hugo"],
- # Include binary named hugo-HUGO_VERSION in the wheel, which is presently stored
- # in python_hugo/binaries (i.e., in a folder binaries/ inside the package directory)
- package_data={"python_hugo": ["binaries/*"]},
- # include_package_data=True,
- # TODO: data_files is deprecated for wheels, so we need to find a better way to
- # include the binary in the wheel
- data_files=[("binaries", [f"python_hugo/binaries/hugo-{HUGO_VERSION}" + FILE_EXT])],
- entry_points={"console_scripts": ["hugo=python_hugo.__init__:__call"]},
+ ext_modules=[
+ Extension(
+ name="hugo.build",
+ sources=[
+ f"python_hugo/binaries/hugo-{HUGO_VERSION}-{HUGO_PLATFORM}-{os.environ.get('GOARCH', HUGO_ARCH)}"
+ + FILE_EXT
+ ],
+ )
+ ],
+ cmdclass={
+ "build_ext": HugoBuilder,
+ "clean": Cleaner,
+ "bdist_wheel": HugoWheel,
+ },
+ packages=["python_hugo", "python_hugo.binaries"],
+ package_data={
+ "python_hugo": [
+ f"binaries/hugo-{HUGO_VERSION}-{HUGO_PLATFORM}-{os.environ.get('GOARCH', HUGO_ARCH)}"
+ + FILE_EXT
+ ]
+ },
+ include_package_data=True,
+ entry_points={"console_scripts": ["hugo=python_hugo.cli:__call"]},
+ # the package version, which is the same as the version
+ # of Hugo that it will build and provide
version=HUGO_VERSION,
)
| Doesn't work outside virtual environments and `--user` installations (have to use a virtual environment)
Currently, the Hugo executable is sourced at either the `python_hugo/binaries/` folder or in the data files inside a virtual environment. Outside virtual environment installations (whether `--user` or not), the site-packages directory is structured differently and the data files are just put in `scripts/` in the Python installation directory â regardless of whether this is the system-wide site packages directory or `Roaming\` on Windows.
Some ways to resolve this would be to inject the binary + adjust the ABI tag at the time of building the wheel or to use a different mechanism to include the binary in the wheel but exclude it from the SDist.
The logic to find the binary in the first place is also a bit of a hack because of this behaviour since it goes into various parent directories, whereas it can simply recurse the `site-packages/` directories by the means of `import site` or `importlib.resources` hooks
Investigate cross-compilation for macOS arm64
There might be options that the Go toolchain can use for this.
|
Note to self: `delocate-fuse` can be used to create universal wheels provided platform specifications can be set or overridden manually (might have to use CIBW_ARCHS_MACOS and check for it in `setup.py`) | 2024-01-04T18:18:03 | 0.0 | [] | [] |
||
ivre/ivre | ivre__ivre-1189 | 27858c8230d4cc687779b67f63e9b282c2e2db10 | diff --git a/ivre/db/__init__.py b/ivre/db/__init__.py
index 8b145c6a7c..cda679dd8d 100644
--- a/ivre/db/__init__.py
+++ b/ivre/db/__init__.py
@@ -2851,6 +2851,22 @@ def __init__(self):
const=False,
default=None,
)
+ self.argparser_insert = ArgumentParser(add_help=False)
+ self.argparser_insert.add_argument("--sensor", "-s", help="Sensor name")
+ self.argparser_insert.add_argument(
+ "--ignore-spec", "-i", help="Filename containing ignore rules"
+ )
+ self.argparser_insert.add_argument(
+ "--bulk",
+ action="store_true",
+ help="Use DB bulk inserts (this is the default)",
+ )
+ self.argparser_insert.add_argument(
+ "--local-bulk", action="store_true", help="Use local (memory) bulk inserts"
+ )
+ self.argparser_insert.add_argument(
+ "--no-bulk", action="store_true", help="Do not use bulk inserts"
+ )
def parse_args(self, args, flt=None):
flt = super().parse_args(args, flt=flt)
diff --git a/ivre/tools/airodump2db.py b/ivre/tools/airodump2db.py
index 62918e8c19..f00a72eaf1 100644
--- a/ivre/tools/airodump2db.py
+++ b/ivre/tools/airodump2db.py
@@ -127,18 +127,7 @@ def rec_iter(
def main() -> None:
"""Update the flow database from Airodump CSV files"""
- parser = ArgumentParser(description=__doc__)
- parser.add_argument("--sensor", "-s", help="Sensor name")
- parser.add_argument("--ignore-spec", "-i", help="Filename containing ignore rules")
- parser.add_argument(
- "--bulk", action="store_true", help="Use DB bulk inserts (this is the default)"
- )
- parser.add_argument(
- "--local-bulk", action="store_true", help="Use local (memory) bulk inserts"
- )
- parser.add_argument(
- "--no-bulk", action="store_true", help="Do not use bulk inserts"
- )
+ parser = ArgumentParser(description=__doc__, parents=[db.passive.argparser_insert])
parser.add_argument("files", nargs="*", metavar="FILE", help="Airodump CSV files")
args = parser.parse_args()
ignore_rules = _get_ignore_rules(args.ignore_spec)
diff --git a/ivre/tools/p0f2db.py b/ivre/tools/p0f2db.py
index 95a6d209f2..dfa90c03ad 100644
--- a/ivre/tools/p0f2db.py
+++ b/ivre/tools/p0f2db.py
@@ -96,18 +96,7 @@ def rec_iter(
def main() -> None:
"""Update the flow database from p0f log files"""
- parser = ArgumentParser(description=__doc__)
- parser.add_argument("--sensor", "-s", help="Sensor name")
- parser.add_argument("--ignore-spec", "-i", help="Filename containing ignore rules")
- parser.add_argument(
- "--bulk", action="store_true", help="Use DB bulk inserts (this is the default)"
- )
- parser.add_argument(
- "--local-bulk", action="store_true", help="Use local (memory) bulk inserts"
- )
- parser.add_argument(
- "--no-bulk", action="store_true", help="Do not use bulk inserts"
- )
+ parser = ArgumentParser(description=__doc__, parents=[db.passive.argparser_insert])
parser.add_argument("files", nargs="*", metavar="FILE", help="p0f log files")
args = parser.parse_args()
ignore_rules = _get_ignore_rules(args.ignore_spec)
diff --git a/ivre/tools/passiverecon2db.py b/ivre/tools/passiverecon2db.py
index 300d257ff4..3cfb9f6f65 100644
--- a/ivre/tools/passiverecon2db.py
+++ b/ivre/tools/passiverecon2db.py
@@ -27,7 +27,7 @@
from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple
-import ivre.db
+from ivre.db import db, DBPassive
import ivre.passive
import ivre.parser.zeek
from ivre.types import Record
@@ -77,28 +77,17 @@ def rec_iter(
def main() -> None:
- parser = ArgumentParser(description=__doc__)
- parser.add_argument("--sensor", "-s", help="Sensor name")
- parser.add_argument("--ignore-spec", "-i", help="Filename containing ignore rules")
- parser.add_argument(
- "--bulk", action="store_true", help="Use DB bulk inserts (this is the default)"
- )
- parser.add_argument(
- "--local-bulk", action="store_true", help="Use local (memory) bulk inserts"
- )
- parser.add_argument(
- "--no-bulk", action="store_true", help="Do not use bulk inserts"
- )
+ parser = ArgumentParser(description=__doc__, parents=[db.passive.argparser_insert])
args = parser.parse_args()
ignore_rules = _get_ignore_rules(args.ignore_spec)
if (not (args.no_bulk or args.local_bulk)) or args.bulk:
- function = ivre.db.db.passive.insert_or_update_bulk
+ function = db.passive.insert_or_update_bulk
elif args.local_bulk:
- function = ivre.db.db.passive.insert_or_update_local_bulk
+ function = db.passive.insert_or_update_local_bulk
else:
function = functools.partial(
- ivre.db.DBPassive.insert_or_update_bulk,
- ivre.db.db.passive,
+ DBPassive.insert_or_update_bulk,
+ db.passive,
)
zeek_parser = ivre.parser.zeek.ZeekFile(sys.stdin.buffer)
function(
| Avoid code duplication in passive options
PR #1169 made it clear that parts of the code need to be factorized, such as the passive insertion options (e.g., `--sensor`, `--*bulk`, etc.).
| 2021-10-02T09:27:54 | 0.0 | [] | [] |
|||
MLH-Fellowship/torchblaze | MLH-Fellowship__torchblaze-101 | 79f6207eed52c366cc272ace2f5f958db8778334 | diff --git a/setup.py b/setup.py
index f09c9f8..927ad57 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
setup (
name = 'torchblaze',
description = 'A CLI-based python package that provides a suite of functionalities to perform end-to-end ML using PyTorch.',
- version = '1.0.2',
+ version = '1.0.3',
packages = find_packages(), # list of all packages
install_requires = install_requires,
python_requires='>=3.7.0',
diff --git a/torchblaze/template.py b/torchblaze/template.py
index 77123a7..a784201 100644
--- a/torchblaze/template.py
+++ b/torchblaze/template.py
@@ -45,7 +45,7 @@ def startproject(project: str):
# same creation logic as the .gitignore file
f = os.path.join(root_dir, "tests.json")
with open(f, "w+") as writefile:
- writefile.writelines(pkg_resources.resource_string('torchblaze', 'template_files/tests.txt').decode('utf-8').split('\n'))
+ writefile.write(pkg_resources.resource_string('torchblaze', 'template_files/tests.txt').decode('utf-8'))
# same creation logic as the .gitignore file
| Bug fix in tests.json
Resolved the tests.json spacing issue in comments.
Changed the v1.0.2 to v1.0.3
Please review and merge my PR
| 2021-02-26T19:49:50 | 0.0 | [] | [] |
|||
Azure/iotedgedev | Azure__iotedgedev-570 | 7a9af2db5626a3b5f2435d44a8b17288c81a4287 | diff --git a/iotedgedev/azurecli.py b/iotedgedev/azurecli.py
index 310e6cdf..f3005908 100644
--- a/iotedgedev/azurecli.py
+++ b/iotedgedev/azurecli.py
@@ -465,9 +465,16 @@ def create_iothub(self, value, resource_group, sku):
self.output.prompt(
"Creating IoT Hub. Please wait as this could take a few minutes to complete...")
- result = self.invoke_az_cli_outproc(["iot", "hub", "create", "--name", value, "--resource-group",
- resource_group, "--sku", sku, "--query", "[].{\"IoT Hub\":name}", "--out", "table"],
- f("Could not create the IoT Hub {value} in {resource_group} with sku {sku}."), stdout_io=io, stderr_io=error_io)
+ cmd = ["iot", "hub", "create", "--name", value, "--resource-group", resource_group, "--sku", sku, "--query", "[].{\"IoT Hub\":name}", "--out", "table"]
+ if sku == "F1":
+ cmd = cmd + ["--partition-count", "2"]
+ # the default partition-count is 4, but F1 allows only 2
+ # from `az iot hub create --help`:
+ # The partition count is the number of partitions that the IoT Hub uses to distribute messages.
+ # The default value is 4.
+ # In Azure Portal the default and only value is 2.
+
+ result = self.invoke_az_cli_outproc(cmd, f("Could not create the IoT Hub {value} in {resource_group} with sku {sku}."), stdout_io=io, stderr_io=error_io)
if not result and error_io.getvalue():
self.output.error(error_io.getvalue())
self.output.line()
| Running `iotedgedev iothub setup` with all default values results in error
<!-- Fill in the information needed -->
- iotedgedev Version: 3.3.4
- Python Version: 3.9.12
- Pip Version: 22.0.4
- Development machine OS Version: Windows WSL, iotedgedev devcontainer
- IoT Edge device OS Version: -
### Steps to Reproduce:
1. Not logged in into azure cli
3. run `iotedgedev iothub setup`
4. select subscription (ms internal subscription) // not sure if this made a difference
### Expected
Create iot hub with the selected default values
### Actual
```sh
=================================
======== SETTING IOT HUB ========
=================================
Setting IoT Hub to 'iotedgedev-iothub-d278b7'...
Checking if 'iotedgedev-iothub-d278b7' IoT Hub exists...
Could not locate the iotedgedev-iothub-d278b7 in iotedgedev-rg.
Creating 'iotedgedev-iothub-d278b7' in 'iotedgedev-rg' with 'F1' sku...
Creating IoT Hub. Please wait as this could take a few minutes to complete...
ERROR: Could not create the IoT Hub iotedgedev-iothub-d278b7 in iotedgedev-rg with sku F1.
ERROR: ERROR: (400016) Invalid PartitionCount 4 - value must be between 2 and 2. If you contact a support representative please include this correlation identifier: cf2e50d2-0ba3-4e2f-b10d-12e38678e6de, timestamp: 2022-04-14 09:43:45Z, errorcode: IH400016.
Code: 400016
Message: Invalid PartitionCount 4 - value must be between 2 and 2. If you contact a support representative please include this correlation identifier: cf2e50d2-0ba3-4e2f-b10d-12e38678e6de, timestamp: 2022-04-14 09:43:45Z, errorcode: IH400016.
Error: Could not create IoT Hub iotedgedev-iothub-d278b7 in iotedgedev-rg
Enter the IoT Hub Name (Creates a new IoT Hub if not found): [iotedgedev-iothub-d278b7]: ^[[A^[[B^[[B^[[B^[[B^[[B^[[B
```
| 2022-05-17T23:42:28 | 0.0 | [] | [] |
|||
makepath/mapshader | makepath__mapshader-89 | 2e61875aa84ab14ff7b82c873f8c3d798288d6c9 | diff --git a/mapshader/flask_app.py b/mapshader/flask_app.py
index 0237e56..f987ce2 100644
--- a/mapshader/flask_app.py
+++ b/mapshader/flask_app.py
@@ -7,7 +7,7 @@
from bokeh.tile_providers import STAMEN_TONER_BACKGROUND
from bokeh.tile_providers import get_provider
-from jinja2 import Template
+from jinja2 import Environment, FileSystemLoader
from bokeh.resources import INLINE
@@ -28,8 +28,9 @@
from mapshader.sources import MapSource
-from mapshader.utils import psutil_fetching, psutils_html
+from mapshader.utils import psutil_fetching
+jinja2_env = Environment(loader=FileSystemLoader("mapshader/templates/"))
def flask_to_tile(source: MapSource, z=0, x=0, y=0):
@@ -122,116 +123,22 @@ def build_previewer(service: MapService):
def service_page(service: MapService):
plot = build_previewer(service)
script, div = components(dict(preview=plot))
-
- template = Template(
- '''
- <!DOCTYPE html>
- <html lang="en">
- <head>
- <meta charset="utf-8">
- <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto">
- <title>{{service.name}}</title>
- {{ resources }}
- {{ script }}
- <style>
- .embed-wrapper {
- display: flex;
- justify-content: space-evenly;
- }
- body {
- font-family: "Roboto", sans-serif;
- }
- .header {
- padding: 10px;
- }
- </style>
- </head>
- <body>
- {{ psutils_html }}
- <div class="header">
- <h3>{{service.name}}</h3>
- <hr />
- <h5><strong>Client URL:</strong>
- {{service.client_url}}
- </h5>
- <h5><strong>Description:</strong>
- {{service.source.description}}
- </h5>
- <h5><strong>Geometry Type:</strong>
- {{service.source.geometry_type.capitalize()}}
- </h5>
- </div>
- <hr />
- <div class="embed-wrapper">
- {% for key in div.keys() %}
- {{ div[key] }}
- {% endfor %}
- </div>
- <hr />
- <div class="header">
- <h4>Details</h4>
- <hr />
- <h5>
- <strong>
- Data Path:
- </strong>
- {{service.source.filepath}}
- </h5>
- <h5>
- <strong>
- Span:
- </strong>
- {{service.source.span}}
- </h5>
- <h5>
- <strong>
- Overviews:
- </strong>
- {{service.source.overviews.keys()}}
- </h5>
- <h5>
- <strong>
- Aggregation Method:
- </strong>
- {{service.source.agg_func}}
- </h5>
- <h5>
- <strong>
- Colormap Interpolation Method:
- </strong>
- {{service.source.shade_how}}
- </h5>
- </div>
- </body>
- </html>
- '''
- )
+ template = jinja2_env.get_template("service_page.html")
resources = INLINE.render()
html = template.render(resources=resources,
script=script,
service=service,
len=len,
- div=div,
- psutils_html=psutils_html())
+ div=div)
return html
def index_page(services):
- links = []
- for s in services:
- links.append(f'<li><a href="{s.service_page_url}">{s.name}</a></li>')
-
- html = '<html>'
- html += '<body>'
- html += '<ul>'
- html += ''.join(links)
- html += '</ul>'
- html += '</body>'
- html += '</html>'
+ template = jinja2_env.get_template('index_page.html')
- return html
+ return template.render(services=services)
def configure_app(app: Flask, user_source_filepath=None, contains=None):
diff --git a/mapshader/templates/index_page.html b/mapshader/templates/index_page.html
new file mode 100644
index 0000000..4932920
--- /dev/null
+++ b/mapshader/templates/index_page.html
@@ -0,0 +1,10 @@
+<html>
+ <body>
+ {% include 'psutils.html' %}
+ <ul>
+ {%- for s in services %}
+ <li><a href="{{s.service_page_url}}">{{s.name}}</a></li>
+ {% endfor %}
+ </ul>
+ </body>
+</html>
diff --git a/mapshader/templates/psutils.html b/mapshader/templates/psutils.html
new file mode 100644
index 0000000..f4b70be
--- /dev/null
+++ b/mapshader/templates/psutils.html
@@ -0,0 +1,134 @@
+<style>
+ #psutils {
+ display: flex;
+ gap: 8px;
+ }
+
+ .bar-main-container {
+ width: 300px;
+ height: 24px;
+ border-radius: 4px;
+ font-family: sans-serif;
+ font-weight: normal;
+ font-size: 0.7em;
+ color: rgb(64, 64, 64);
+ }
+
+ .wrap {
+ padding: 0 8px;
+ position: relative;
+ }
+
+ .bar-text {
+ width: calc(100% - 14px);
+ position: absolute;
+ display: flex;
+ justify-content: center;
+ top: 4.5px;
+ }
+
+ .bar-container {
+ float: right;
+ border-radius: 10px;
+ height: 10px;
+ background: rgba(0, 0, 0, 0.13);
+ width: 100%;
+ margin: 7px 0px;
+ overflow: hidden;
+ }
+
+ .bar {
+ float: left;
+ background: #ffffffd1;
+ height: 100%;
+ border-radius: 10px 0px 0px 10px;
+ opacity: 1;
+ transition: width 0.1s;
+ width: 0%;
+ }
+
+ /* COLORS */
+ .azure {
+ background: #38b1cc;
+ }
+ .emerald {
+ background: #2cb299;
+ }
+ .violet {
+ background: #8e5d9f;
+ }
+ .yellow {
+ background: #efc32f;
+ }
+ .red {
+ background: #e44c41;
+ }
+</style>
+<div id="psutils">
+ <div class="bar-main-container azure">
+ <div class="wrap">
+ <span class="bar-text">
+ <span>CPU: </span>
+ <span id="cpu-percentage">0,0%</span>
+ </span>
+ <div class="bar-container">
+ <div id="cpu-percentage-bar" class="bar"></div>
+ </div>
+ </div>
+ </div>
+ <div class="bar-main-container violet">
+ <div class="wrap">
+ <span class="bar-text">
+ <span>MEMORY: </span>
+ <span id="memory-percentage">0,0%</span>
+ </span>
+ <div class="bar-container">
+ <div id="memory-percentage-bar" class="bar"></div>
+ </div>
+ </div>
+ </div>
+ <div class="bar-main-container yellow">
+ <div class="wrap">
+ <span class="bar-text">
+ <span>DISK: </span>
+ <span id="disk-percentage">0,0%</span>
+ </span>
+ <div class="bar-container">
+ <div id="disk-percentage-bar" class="bar"></div>
+ </div>
+ </div>
+ </div>
+</div>
+<script>
+ const fetchAndPopulate = async () => {
+ const data = await fetch("http://localhost:5000/psutil");
+ const log = await data.json();
+
+ document.getElementById(
+ "cpu-percentage"
+ ).innerText = `${log.cpu.cpu_usage_percentage}%`;
+
+ document.getElementById(
+ "cpu-percentage-bar"
+ ).style.width = `${log.cpu.cpu_usage_percentage}%`;
+
+ document.getElementById(
+ "memory-percentage"
+ ).innerText = `${log.memory.percent}%`;
+
+ document.getElementById(
+ "memory-percentage-bar"
+ ).style.width = `${log.memory.percent}%`;
+
+ document.getElementById(
+ "disk-percentage"
+ ).innerText = `${log.disk.percent}%`;
+
+ document.getElementById(
+ "disk-percentage-bar"
+ ).style.width = `${log.disk.percent}%`;
+ };
+ fetchAndPopulate();
+
+ setInterval(fetchAndPopulate, 2000);
+</script>
diff --git a/mapshader/templates/service_page.html b/mapshader/templates/service_page.html
new file mode 100644
index 0000000..815e0e9
--- /dev/null
+++ b/mapshader/templates/service_page.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8" />
+ <link
+ rel="stylesheet"
+ href="https://fonts.googleapis.com/css?family=Roboto"
+ />
+ <title>{{service.name}}</title>
+ {{ resources }} {{ script }}
+ <style>
+ .embed-wrapper {
+ display: flex;
+ justify-content: space-evenly;
+ }
+ body {
+ font-family: "Roboto", sans-serif;
+ }
+ .header {
+ padding: 10px;
+ }
+ </style>
+ </head>
+ <body>
+ {% include "psutils.html" %}
+
+ <div class="header">
+ <h3>{{service.name}}</h3>
+ <hr />
+ <h5>
+ <strong>Client URL:</strong>
+ {{service.client_url}}
+ </h5>
+ <h5>
+ <strong>Description:</strong>
+ {{service.source.description}}
+ </h5>
+ <h5>
+ <strong>Geometry Type:</strong>
+ {{service.source.geometry_type.capitalize()}}
+ </h5>
+ </div>
+ <hr />
+ <div class="embed-wrapper">
+ {% for key in div.keys() %} {{ div[key] }} {% endfor %}
+ </div>
+ <hr />
+ <div class="header">
+ <h4>Details</h4>
+ <hr />
+ <h5>
+ <strong> Data Path: </strong>
+ {{service.source.filepath}}
+ </h5>
+ <h5>
+ <strong> Span: </strong>
+ {{service.source.span}}
+ </h5>
+ <h5>
+ <strong> Overviews: </strong>
+ {{service.source.overviews.keys()}}
+ </h5>
+ <h5>
+ <strong> Aggregation Method: </strong>
+ {{service.source.agg_func}}
+ </h5>
+ <h5>
+ <strong> Colormap Interpolation Method: </strong>
+ {{service.source.shade_how}}
+ </h5>
+ </div>
+ </body>
+</html>
diff --git a/mapshader/utils.py b/mapshader/utils.py
index 7f69c57..ddee6de 100644
--- a/mapshader/utils.py
+++ b/mapshader/utils.py
@@ -65,141 +65,3 @@ def psutil_fetching():
}
return log
-
-def psutils_html():
- return '''
- <style>
- #psutils {
- display: flex;
- gap: 8px;
- }
-
- .bar-main-container {
- width: 300px;
- height: 24px;
- border-radius: 4px;
- font-family: sans-serif;
- font-weight: normal;
- font-size: 0.7em;
- color: rgb(64, 64, 64);
- }
-
- .wrap {
- padding: 0 8px;
- position: relative;
- }
-
- .bar-text {
- width: calc(100% - 14px);
- position: absolute;
- display: flex;
- justify-content: center;
- top: 4.5px;
- }
-
- .bar-container {
- float: right;
- border-radius: 10px;
- height: 10px;
- background: rgba(0, 0, 0, 0.13);
- width: 100%;
- margin: 7px 0px;
- overflow: hidden;
- }
-
- .bar {
- float: left;
- background: #ffffffd1;
- height: 100%;
- border-radius: 10px 0px 0px 10px;
- opacity: 1;
- transition: width 0.1s;
- width: 0%;
- }
-
- /* COLORS */
- .azure {
- background: #38b1cc;
- }
- .emerald {
- background: #2cb299;
- }
- .violet {
- background: #8e5d9f;
- }
- .yellow {
- background: #efc32f;
- }
- .red {
- background: #e44c41;
- }
- </style>
- <div id="psutils">
- <div class="bar-main-container azure">
- <div class="wrap">
- <span class="bar-text">
- <span>CPU: </span>
- <span id="cpu-percentage">0,0%</span>
- </span>
- <div class="bar-container">
- <div id="cpu-percentage-bar" class="bar"></div>
- </div>
- </div>
- </div>
- <div class="bar-main-container violet">
- <div class="wrap">
- <span class="bar-text">
- <span>MEMORY: </span>
- <span id="memory-percentage">0,0%</span>
- </span>
- <div class="bar-container">
- <div id="memory-percentage-bar" class="bar"></div>
- </div>
- </div>
- </div>
- <div class="bar-main-container yellow">
- <div class="wrap">
- <span class="bar-text">
- <span>DISK: </span>
- <span id="disk-percentage">0,0%</span>
- </span>
- <div class="bar-container">
- <div id="disk-percentage-bar" class="bar"></div>
- </div>
- </div>
- </div>
- </div>
- <script>
- const fetchAndPopulate = async () => {
- const data = await fetch("http://localhost:5000/psutil");
- const log = await data.json();
-
- document.getElementById(
- "cpu-percentage"
- ).innerText = `${log.cpu.cpu_usage_percentage}%`;
-
- document.getElementById(
- "cpu-percentage-bar"
- ).style.width = `${log.cpu.cpu_usage_percentage}%`;
-
- document.getElementById(
- "memory-percentage"
- ).innerText = `${log.memory.percent}%`;
-
- document.getElementById(
- "memory-percentage-bar"
- ).style.width = `${log.memory.percent}%`;
-
- document.getElementById(
- "disk-percentage"
- ).innerText = `${log.disk.percent}%`;
-
- document.getElementById(
- "disk-percentage-bar"
- ).style.width = `${log.disk.percent}%`;
- };
- fetchAndPopulate();
-
- setInterval(fetchAndPopulate, 2000);
- </script>
- '''
| Move HTML hardcoded into HTML file
- [x] mapshader/flask_app.py
- [x] mapshader/utils.py
| 2021-06-08T00:27:40 | 0.0 | [] | [] |
|||
auth0/auth0-python | auth0__auth0-python-283 | e24356fdc04fe0130ccaa94fc034ef92e6dbdad3 | diff --git a/auth0/v3/authentication/token_verifier.py b/auth0/v3/authentication/token_verifier.py
index 82d8321d..7e74b693 100644
--- a/auth0/v3/authentication/token_verifier.py
+++ b/auth0/v3/authentication/token_verifier.py
@@ -1,3 +1,4 @@
+"""Token Verifier module"""
import json
import time
@@ -8,6 +9,13 @@
class SignatureVerifier(object):
+ """Abstract class that will verify a given JSON web token's signature
+ using the key fetched internally given its key id.
+
+ Args:
+ algorithm (str): The expected signing algorithm (e.g. RS256).
+ """
+
DISABLE_JWT_CHECKS = {
"verify_signature": True,
"verify_exp": False,
@@ -20,42 +28,33 @@ class SignatureVerifier(object):
"require_nbf": False,
}
- """Abstract class that will verify a given JSON web token's signature
- using the key fetched internally given its key id.
-
- Args:
- algorithm (str): The expected signing algorithm (e.g. RS256).
- """
-
def __init__(self, algorithm):
if not algorithm or type(algorithm) != str:
raise ValueError("algorithm must be specified.")
self._algorithm = algorithm
- """Obtains the key associated to the given key id.
- Must be implemented by subclasses.
+ def _fetch_key(self, key_id=None):
+ """Obtains the key associated to the given key id.
+ Must be implemented by subclasses.
- Args:
- key_id (str, optional): The id of the key to fetch.
-
- Returns:
- the key to use for verifying a cryptographic signature
- """
+ Args:
+ key_id (str, optional): The id of the key to fetch.
- def _fetch_key(self, key_id=None):
+ Returns:
+ the key to use for verifying a cryptographic signature
+ """
raise NotImplementedError
- """Verifies the signature of the given JSON web token.
+ def verify_signature(self, token):
+ """Verifies the signature of the given JSON web token.
- Args:
- token (str): The JWT to get its signature verified.
+ Args:
+ token (str): The JWT to get its signature verified.
- Raises:
- TokenValidationError: if the token cannot be decoded, the algorithm is invalid
- or the token's signature doesn't match the calculated one.
- """
-
- def verify_signature(self, token):
+ Raises:
+ TokenValidationError: if the token cannot be decoded, the algorithm is invalid
+ or the token's signature doesn't match the calculated one.
+ """
try:
header = jwt.get_unverified_header(token)
except jwt.exceptions.DecodeError:
@@ -111,8 +110,6 @@ def _fetch_key(self, key_id=None):
class JwksFetcher(object):
- CACHE_TTL = 600 # 10 min cache lifetime
-
"""Class that fetches and holds a JSON web key set.
This class makes use of an in-memory cache. For it to work properly, define this instance once and re-use it.
@@ -121,6 +118,8 @@ class JwksFetcher(object):
cache_ttl (str, optional): The lifetime of the JWK set cache in seconds. Defaults to 600 seconds.
"""
+ CACHE_TTL = 600 # 10 min cache lifetime
+
def __init__(self, jwks_url, cache_ttl=CACHE_TTL):
self._jwks_url = jwks_url
self._init_cache(cache_ttl)
@@ -132,15 +131,14 @@ def _init_cache(self, cache_ttl):
self._cache_ttl = cache_ttl
self._cache_is_fresh = False
- """Attempts to obtain the JWK set from the cache, as long as it's still valid.
- When not, it will perform a network request to the jwks_url to obtain a fresh result
- and update the cache value with it.
-
- Args:
- force (bool, optional): whether to ignore the cache and force a network request or not. Defaults to False.
- """
-
def _fetch_jwks(self, force=False):
+ """Attempts to obtain the JWK set from the cache, as long as it's still valid.
+ When not, it will perform a network request to the jwks_url to obtain a fresh result
+ and update the cache value with it.
+
+ Args:
+ force (bool, optional): whether to ignore the cache and force a network request or not. Defaults to False.
+ """
has_expired = self._cache_date + self._cache_ttl < time.time()
if not force and not has_expired:
@@ -160,11 +158,11 @@ def _fetch_jwks(self, force=False):
self._cache_date = time.time()
return self._cache_value
- """Converts a JWK string representation into a binary certificate in PEM format.
- """
-
@staticmethod
def _parse_jwks(jwks):
+ """
+ Converts a JWK string representation into a binary certificate in PEM format.
+ """
keys = {}
for key in jwks['keys']:
@@ -174,19 +172,19 @@ def _parse_jwks(jwks):
keys[key["kid"]] = rsa_key
return keys
- """Obtains the JWK associated with the given key id.
- Args:
- key_id (str): The id of the key to fetch.
+ def get_key(self, key_id):
+ """Obtains the JWK associated with the given key id.
- Returns:
- the JWK associated with the given key id.
-
- Raises:
- TokenValidationError: when a key with that id cannot be found
- """
+ Args:
+ key_id (str): The id of the key to fetch.
- def get_key(self, key_id):
+ Returns:
+ the JWK associated with the given key id.
+
+ Raises:
+ TokenValidationError: when a key with that id cannot be found
+ """
keys = self._fetch_jwks()
if keys and key_id in keys:
@@ -221,21 +219,21 @@ def __init__(self, signature_verifier, issuer, audience, leeway=0):
self._sv = signature_verifier
self._clock = None # visible for testing
- """Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.
+ def verify(self, token, nonce=None, max_age=None, organization=None):
+ """Attempts to verify the given ID token, following the steps defined in the OpenID Connect spec.
- Args:
- token (str): The JWT to verify.
- nonce (str, optional): The nonce value sent during authentication.
- max_age (int, optional): The max_age value sent during authentication.
- organization (str, optional): The expected organization ID (org_id) claim value. This should be specified
+ Args:
+ token (str): The JWT to verify.
+ nonce (str, optional): The nonce value sent during authentication.
+ max_age (int, optional): The max_age value sent during authentication.
+ organization (str, optional): The expected organization ID (org_id) claim value. This should be specified
when logging in to an organization.
-
- Raises:
- TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,
- the token signature is invalid or the token has a claim missing or with unexpected value.
- """
- def verify(self, token, nonce=None, max_age=None, organization=None):
+ Raises:
+ TokenValidationError: when the token cannot be decoded, the token signing algorithm is not the expected one,
+ the token signature is invalid or the token has a claim missing or with unexpected value.
+ """
+
# Verify token presence
if not token or not isinstance(token, str):
raise TokenValidationError("ID token is required but missing.")
| Token Verifier documentation not showing in docs
<img width="485" alt="Screen Shot 2021-08-13 at 09 27 13" src="https://user-images.githubusercontent.com/6595551/129356909-508c45e5-e0d2-48ba-916b-dd9430418069.png">
As you can see in the image the token verifier docs isn't showing I believe is just because it is misplaced in the Class code, I'll open a PR for that ;)
| 2021-08-13T12:56:30 | 0.0 | [] | [] |
|||
iqmo-org/magic_duckdb | iqmo-org__magic_duckdb-29 | 88cf528cca5cc551984b8829617cd768fbf8bfb6 | diff --git a/magic_duckdb/duckdb_mode.py b/magic_duckdb/duckdb_mode.py
index 0ad811f..3abc9cd 100644
--- a/magic_duckdb/duckdb_mode.py
+++ b/magic_duckdb/duckdb_mode.py
@@ -46,7 +46,10 @@ class DuckDbMode:
]
def default_connection(self) -> duckdb.DuckDBPyConnection:
- return duckdb.default_connection
+ if isinstance(duckdb.default_connection, duckdb.DuckDBPyConnection):
+ return duckdb.default_connection
+ else:
+ return duckdb.default_connection() # https://github.com/duckdb/duckdb/pull/13442 changed from property to function
def connect(self, conn_string: str) -> duckdb.DuckDBPyConnection:
return duckdb.connect(conn_string)
| default_connection is not longer an attribute, but a method
Support https://github.com/duckdb/duckdb/pull/13442, which changed behavior of the connection.
| 2024-11-13T12:42:48 | 0.0 | [] | [] |
|||
freelawproject/juriscraper | freelawproject__juriscraper-1127 | 3c5a164469641c95b65fd37792c7ea950a6b294a | diff --git a/juriscraper/opinions/united_states/federal_appellate/scotus_slip.py b/juriscraper/opinions/united_states/federal_appellate/scotus_slip.py
index a0a1b2ad1..7a5a3861f 100644
--- a/juriscraper/opinions/united_states/federal_appellate/scotus_slip.py
+++ b/juriscraper/opinions/united_states/federal_appellate/scotus_slip.py
@@ -2,8 +2,10 @@
Court Contact: https://www.supremecourt.gov/contact/contact_webmaster.aspx
"""
-from datetime import date
+from datetime import date, datetime
+from typing import Dict, List, Union
+from juriscraper.AbstractSite import logger
from juriscraper.OpinionSiteLinear import OpinionSiteLinear
@@ -27,38 +29,39 @@ class Site(OpinionSiteLinear):
"SS": "Sonia Sotomayor",
"T": "Clarence Thomas",
}
+ base_url = "https://www.supremecourt.gov/opinions/slipopinion"
+ first_opinion_date = datetime(2018, 6, 25)
+ days_interval = 365
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
- self.yy = self._get_current_term()
self.status = "Published"
- self.url_base = "https://www.supremecourt.gov/opinions"
- self.precedential = "Published"
- self.court = "slipopinion"
- self.url = f"{self.url_base}/{self.court}/{self.yy}"
+ self.url = f"{self.base_url}/{self.get_term()}"
+ self.make_backscrape_iterable(kwargs)
@staticmethod
- def _get_current_term():
+ def get_term(
+ date_of_interest: Union[date, datetime] = date.today()
+ ) -> int:
"""The URLs for SCOTUS correspond to the term, not the calendar.
The terms kick off on the first Monday of October, so we use October 1st
as our cut off date.
"""
- today = date.today()
- term_cutoff = date(today.year, 10, 1)
- if today < term_cutoff:
- # Haven't hit the cutoff, return previous year.
- return int(today.strftime("%y")) - 1 # y3k bug!
- else:
- return today.strftime("%y")
+ term_cutoff = date(date_of_interest.year, 10, 1)
+ if isinstance(date_of_interest, datetime):
+ date_of_interest = date_of_interest.date()
+ year = int(date_of_interest.strftime("%y"))
+ # Return the previous year if we haven't reached the cutoff
+ return year - 1 if date_of_interest < term_cutoff else year
def _process_html(self):
for row in self.html.xpath("//tr"):
cells = row.xpath(".//td")
if len(cells) != 6:
continue
- a, date, docket, link, justice, citation = row.xpath(".//td")
+ _, date, docket, link, justice, citation = row.xpath(".//td")
if not link.text_content():
continue
self.cases.append(
@@ -71,3 +74,27 @@ def _process_html(self):
"judge": self.justices[justice.text_content()],
}
)
+
+ def make_backscrape_iterable(self, kwargs: Dict) -> List[str]:
+ """Use the default make_backscrape_iterable to parse input
+ and create date objects. Then, use the dates to get the terms
+
+ Note that the HTML slipopinion page exists only since term 17
+
+ :return: a list of URLs
+ """
+ super().make_backscrape_iterable(kwargs)
+ start = self.get_term(self.back_scrape_iterable[0][0])
+ end = self.get_term(self.back_scrape_iterable[-1][1])
+ if start == end:
+ self.back_scrape_iterable = [f"{self.base_url}/{start}"]
+ else:
+ self.back_scrape_iterable = [
+ f"{self.base_url}/{yy}" for yy in range(start, end)
+ ]
+
+ def _download_backwards(self, d: str):
+ self.url = d
+ logger.info("Backscraping %s", self.url)
+ self.html = self._download()
+ self._process_html()
| Get citations using scrapers
I'm going to workshop my thoughts on prioritization here - and welcome feedback and thoughts.
| @grossir can you please add your suggestion for using back scrapers to collect citations or other material posted later.
Sure @flooie
This would work for sources that have
1. Have a "citation" column on their HTML pages
2. The court leaves it as a placeholder for some time, until it populates it
An example is `md`, compare this 2 images from 2023 and 2024 (the current year), where citations are not populated yet


The approach is to run the backscraper with a custom caller. Here is some pseudocode
```python
from juriscraper.opinions.united_states.state import md as scraper_module
from juriscraper.lib.importer import site_yielder
from cl.search.models import Opinion, OpinionCluster
from cl.scrapers.management.commands.cl_scrape_opinions import make_citation
import logging
logger = logging.getLogger(__name__)
class CitationCollector:
def scrape_citations(self, start_date, end_date):
for site in site_yielder(
scraper_module.Site(
backscrape_start=start_date,
backscrape_end=end_date,
).back_scrape_iterable,
scraper_module,
):
# get case dicts by parsing HTML
site.parse()
court_id = scraper_module.court_id.split("/")[-1].split("_")[0]
for record in site:
citation = record['citations']
if not citation:
continue
# get cluster using download_url or hash of the document
cluster = Opinion.objects.get(download_url=record['download_urls']).cluster
# check if citation exists
if self.citation_exists(citation, cluster):
logger.info("Citation already exists '%s' for cluster %s", record['citations'], cluster.id)
continue
citation = make_citation(citation, cluster, court_id)
citation.save()
def citation_exists(self, citation, cluster):
"""To implement"""
return False
```
Simple enough. Is it a good idea to analyze this across all states to figure out:
- Which have it
- How delayed each is
- How far back each goes
- How difficult each is to scrape
- ?
Thank you guys.
Also, should we spin this off into it's own ticket and task? My hope was to use this issue to discuss high level architecture of a new Juriscraper system, not features we want to add?
I have a spreadsheet that looked at each state - and where these citations could be pulled from. In many cases the citations appear later on the scrapers and in others there is a second cite that could be scraped. The two probably are lexis or west cites that could be scraped (maybe).
https://docs.google.com/spreadsheets/d/1zYP_4ivL2XQF8mlrgdTmzXB57sTn6UYv8GrrRkq7X5Q/edit?usp=sharing
STATE CITES|COUNT
-- | --
YES | 27
PROBABLE | 2
UNCLEAR | 6
NO | 16
10 with neutral citations
That's not too bad! Let's keep filling this in with info about how far back each goes, and things like that.
Yes but I think many of these links are unrelated to the current scrapers - so it's more of a jumping off point for this . | 2024-08-20T21:29:15 | 0.0 | [] | [] |
||
SafeAILab/EAGLE | SafeAILab__EAGLE-69 | fe31550f69fb12c85070d7c23d552bd8f64ecc8d | diff --git a/eagle/model/modeling_llama_kv.py b/eagle/model/modeling_llama_kv.py
index fa4805a..10d7079 100644
--- a/eagle/model/modeling_llama_kv.py
+++ b/eagle/model/modeling_llama_kv.py
@@ -724,12 +724,26 @@ def forward(
)
hidden_states = residual + hidden_states
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.where(
+ torch.isinf(hidden_states).any(),
+ torch.finfo(hidden_states.dtype).max - 1000,
+ torch.finfo(hidden_states.dtype).max,
+ )
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.where(
+ torch.isinf(hidden_states).any(),
+ torch.finfo(hidden_states.dtype).max - 1000,
+ torch.finfo(hidden_states.dtype).max,
+ )
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
| RuntimeError: probability tensor contains either `inf`, `nan` or element < 0
Great work!
I tried your [example](https://github.com/SafeAILab/EAGLE#:~:text=llama%2D2%2Dchat%5D-,With%20Code,-You%20can%20use) for llama-7b-chat and changed the tree structure in choices.py into `baseline = [[0], [0, 0], [0, 0, 0], [0, 0, 0, 0]]` to simulate speculative decoding without token tree, and got the error above. Any clues?
log attached:

| This seems to be an issue with the original LLaMA2-Chat encountering NaN. What was your input?
> This seems to be an issue with the original LLaMA2-Chat encountering NaN. What was your input?
Simply "Hello". @Liyuhui-12
I can run this code normally. Did you use the correct weights for LLaMA2-Chat? It should be meta-llama/Llama-2-7b-chat-hf instead of meta-llama/Llama-2-7b-chat.
I got the tree shape wrong, my bad. Thanks!
What was wrong with your original tree shape @cyLi-Tiger? I am facing a similar issue | 2024-05-07T11:59:16 | 0.0 | [] | [] |
||
agriyakhetarpal/hugo-python-distributions | agriyakhetarpal__hugo-python-distributions-94 | 79be9309b7ef9974eea2fd3bca717a022ef9e876 | diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
index b4365a4..5ba14d9 100644
--- a/.github/workflows/cd.yml
+++ b/.github/workflows/cd.yml
@@ -61,6 +61,8 @@ jobs:
output-dir: wheelhouse
env:
CIBW_ARCHS_WINDOWS: AMD64
+ CIBW_BEFORE_BUILD_WINDOWS: "pip install delvewheel"
+ CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: delvewheel repair -w {dest_dir} {wheel}
CIBW_TEST_COMMAND: >
hugo version
hugo env --logLevel debug
@@ -213,7 +215,6 @@ jobs:
output-dir: wheelhouse
env:
CIBW_ARCHS_MACOS: x86_64
- CIBW_REPAIR_WHEEL_COMMAND_MACOS: ''
CIBW_TEST_COMMAND: >
hugo version
hugo env --logLevel debug
@@ -242,7 +243,6 @@ jobs:
output-dir: wheelhouse
env:
CIBW_ARCHS_MACOS: arm64
- CIBW_REPAIR_WHEEL_COMMAND_MACOS: ''
CIBW_TEST_COMMAND: >
hugo version
hugo env --logLevel debug
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 28bc9aa..2bd0f23 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -194,6 +194,8 @@ jobs:
CXX: zig c++ -target aarch64-windows-gnu
CIBW_BUILD: "cp312-*"
CIBW_ARCHS_WINDOWS: ARM64
+ CIBW_BEFORE_BUILD_WINDOWS: "pip install delvewheel"
+ CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: delvewheel repair -w {dest_dir} {wheel}
CIBW_TEST_SKIP: "*-win_arm64"
# Note: cibuildwheel will manage installing 32-bit Python on Windows. We
@@ -211,6 +213,8 @@ jobs:
CXX: zig c++ -target x86-windows-gnu
CIBW_BUILD: "cp312-*"
CIBW_ARCHS_WINDOWS: x86
+ CIBW_BEFORE_BUILD_WINDOWS: "pip install delvewheel"
+ CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: delvewheel repair -w {dest_dir} {wheel}
CIBW_TEST_COMMAND: >
hugo version
hugo env --logLevel debug
diff --git a/setup.py b/setup.py
index a4034d8..01dbbed 100644
--- a/setup.py
+++ b/setup.py
@@ -194,6 +194,15 @@ def run(self):
f"-s -w -X github.com/gohugoio/hugo/common/hugo.vendorInfo={HUGO_VENDOR_NAME}"
]
+ # Build a static binary on Windows to avoid missing DLLs from MinGW,
+ # i.e., libgcc_s_seh-1.dll, libstdc++-6.dll, etc.
+ BUILDING_FOR_WINDOWS = (
+ os.environ.get("GOOS") == "windows" or sys.platform == "win32"
+ )
+
+ if BUILDING_FOR_WINDOWS:
+ ldflags.append("-extldflags '-static'")
+
if not (Path(HUGO_CACHE_DIR).resolve() / f"hugo-{HUGO_VERSION}").exists():
subprocess.check_call(
[
| Unable to create a new Hugo Project using Python
I created a Python virtual environment and installed the `hugo` static site generator using `pip install hugo`. However, when I try to use the `hugo` command in the terminal, I encounter an error and am unable to proceed. This prevents me from creating a Hugo project using Python.
**Steps to Reproduce:**
1. Create and activate a new Python virtual environment.
2. Install hugo `using pip install hugo`.
3. Run the command `hugo version`.
**Error Message:**
```
(.venv) PS P:\Project-Name> hugo version
Running Hugo 0.125.4 via hugo-python-distributions at P:\Project-Name\.venv\lib\site-packages\hugo\binaries\hugo-0.125.4-windows-amd64.exe
Traceback (most recent call last):
File "D:\Python310\lib\runpy.py", line 196, in _run_module_as_main
return _run_code(code, main_globals, None,
File "D:\Python310\lib\runpy.py", line 86, in _run_code
exec(code, run_globals)
File "P:\Project-Name\.venv\Scripts\hugo.exe\__main__.py", line 7, in <module>
File "P:\Project-Name\.venv\lib\site-packages\hugo\cli.py", line 68, in __call
check_call([hugo_executable(), *argv[1:]])
File "D:\Python310\lib\subprocess.py", line 369, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['P:\\Project-Name\\.venv\\lib\\site-packages\\hugo\\binaries\\hugo-0.125.4-windows-amd64.exe', 'version']' returned non-zero exit status 3221225781.
```
**What I want to do:** Just create a static hugo project using Python.
| Hi, @MuhammadNYoussef! Thanks for filing this bug report â first one by an external user. :)
I'm unsure why this happens, because I have been testing the wheels after I have built them since I started the project altogether.
Do the below links help?
https://stackoverflow.com/questions/49413443/trouble-debugging-error-with-exit-code-3221225781-missing-libraries-in-windows
https://stackoverflow.com/questions/60348640/docker-returning-exit-code-3221225781-installing-vc-redist-x64-exe
I think what is happening is that running the executable requires some version of the Windows UCRT and other dependencies, which is available on GitHub Actions runners, but probably not on regular Windows systems... I'll look for more reasons where this comes from, but it looks like there are some missing DLLs that the program is linked to but I haven't copied them into the wheel.
Strange â no dependencies seem to be required:
```
$ pipx run delvewheel repair hugo-0.125.4-py3-none-win_amd64.whl
repairing hugo-0.125.4-py3-none-win_amd64.whl
finding DLL dependencies
no external dependencies are needed
wheel copied to /Users/agriyakhetarpal/wheelhouse/hugo-0.125.4-py3-none-win_amd64.whl
```
and so says `repairwheel`:
```
$ pipx run repairwheel -o . hugo-0.125.4-py3-none-win_amd64.whl
repairing /Users/agriyakhetarpal/hugo-0.125.4-py3-none-win_amd64.whl
finding DLL dependencies
no external dependencies are needed
wheel copied to /var/folders/b3/2bq1m1_50bs4c7305j8vxcqr0000gn/T/repairwheel5gi0wkbv/hugo-0.125.4-py3-none-win_amd64.whl
Wrote /Users/agriyakhetarpal/hugo-0.125.4-py3-none-win_amd64.whl
```
I will test on a Windows system as well, just to confirm nothing is being missed.
Found the bug â I had to compile a static binary with MinGW, of course. No one has ever reported this, and it wasn't caught before in CI. I'll be happy to fix that in the next release, or would you want me to issue a patch release? I don't think I'll be able to backport this to the several past releases, and therefore would lean towards fixing it in the next one, so, as a temporary workaround, you can install MinGW using the Chocolatey package manager (`choco install mingw`) after installing Chocolatey, and everything will then work.
Hey @agriyakhetarpal, Thanks a bunch for looking into my problem! The workaround you suggested is really helpful. I appreciate you taking the time to figure that out. Looking forward to a permanent fix, but in the meantime, this is a lifesaver! | 2024-04-29T10:48:23 | 0.0 | [] | [] |
||
ipeaGIT/geobr | ipeaGIT__geobr-323 | 691cb3bdeabb8d5379be8370afeab768b639ee87 | diff --git a/.github/workflows/Python-CMD-check.yaml b/.github/workflows/Python-CMD-check.yaml
new file mode 100644
index 00000000..d40b8d20
--- /dev/null
+++ b/.github/workflows/Python-CMD-check.yaml
@@ -0,0 +1,81 @@
+on: [push]
+
+name: Python-CMD-check
+
+jobs:
+ Python-CMD-check:
+ runs-on: ${{ matrix.os }}
+
+ name: ${{ matrix.os }} (${{ matrix.python-version }})
+
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macOS-latest , windows-latest]
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
+ # Python 3.8 support ends in 2024-10
+ # Python 3.12 support starts in 2023-10
+ # Check Python maintenance status at: https://www.python.org/downloads/
+
+ env:
+ GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
+
+ steps:
+ - name: Check out geobr
+ uses: actions/checkout@v3
+
+ - name: Setup Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: 'pip'
+
+ - name: Install dependencies (Ubuntu)
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ # Add the Ubuntu GIS PPA
+ sudo add-apt-repository ppa:ubuntugis/ppa
+ sudo apt update
+ # Install geos library (shapely requirement)
+ sudo apt-get install libgeos++-dev libgeos3.10.2 libgeos-c1v5 libgeos-dev libgeos-doc
+ if: matrix.os == 'ubuntu-latest'
+ working-directory: python-package
+
+ - name: Install dependencies (Windows)
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ if: matrix.os == 'windows-latest'
+ working-directory: python-package
+
+ - name: Install dependencies (MacOS)
+ run: |
+ python -m pip install --upgrade pip
+ brew install geos
+ pip install shapely --no-binary shapely
+ pip install -r requirements.txt
+ if: matrix.os == 'macOS-latest'
+ working-directory: python-package
+
+ - name: Save dependencies
+ run: pip freeze > requirements.txt
+
+ - name: Install testing dependencies
+ run: |
+ pip install pytest
+ pip install coverage
+
+ - name: Run tests
+ run: |
+ coverage run -m pytest -x ./tests
+ coverage report --omit="tests/*" -m >> test-results.txt
+ working-directory: python-package
+
+ - name: Upload check results
+ if: always()
+ uses: actions/upload-artifact@v3
+ with:
+ name: test-results
+ path: python-package/test-results.txt
+ if-no-files-found: warn
diff --git a/python-package/geobr/list_geobr.py b/python-package/geobr/list_geobr.py
index 36ee6238..1f6d48b0 100644
--- a/python-package/geobr/list_geobr.py
+++ b/python-package/geobr/list_geobr.py
@@ -1,3 +1,4 @@
+from requests import get
import pandas as pd
from urllib.error import HTTPError
@@ -16,7 +17,7 @@ def list_geobr():
"""
try:
- df = pd.read_html("https://github.com/ipeaGIT/geobr/blob/master/README.md")[1]
+ df = pd.read_html(get("https://github.com/ipeaGIT/geobr/blob/master/README.md").text)[1]
except HTTPError:
print(
diff --git a/python-package/requirements.txt b/python-package/requirements.txt
index c10b235a..c5e3d451 100644
--- a/python-package/requirements.txt
+++ b/python-package/requirements.txt
@@ -1,5 +1,9 @@
-geopandas==0.7.0
+geopandas==0.11.0
jinja2>=2.11.3
requests==2.20.0
-Shapely==1.7.0
+Shapely==1.8.1
+numpy>=1.22.4
+beautifulsoup4
lxml
+html5lib
+
| Create function tests for Python
It might be a good idea to use the [pytest testing system](http://doc.pytest.org/en/latest).
| Hello, I would be happy to help!
Pytest offers lots of features to improve testability like [fixtures](https://doc.pytest.org/en/latest/how-to/fixtures.html), [parameters decorators](https://doc.pytest.org/en/latest/how-to/parametrize.html) and [marks](https://doc.pytest.org/en/latest/how-to/mark.html).
Given that most tests are already written and use the default `assert` function (which integrates seamlessly with Pytest), I imagine that the suggestion is to modify the tests structures in a manner such as:
* Split existing functions (with various `assert`s) into more specific ones with detailed parameters and error description/logs
Did I get it correctly? If no, what would be the desired improvements?
Hi @vss-2 . Thanks for your help. Yes, we already have some tests. It would be great to integrate the Python tests with code coverage and github actions. This way, we would (1) have a better understanding of how much of geobr Python code is tested and, (2) more importantly, to have automated checks on commits and pull requests to check if they don't break the Python package.
I assume Pytest would be a simple way to do this. rigth ?
Absolutely right, I hadn't noticed Python package was missing test coverage.
Although I have limited experience with GitHub Actions, I am committed to implement them in my geobr fork.
As soon as I successfully complete the setup (or need some help, hahah), I will notify here.
Hello, after an extensive solving dependecy issues,
I've succesfully established this [stable workflow](https://github.com/vss-2/geobr/actions/runs/5941494684) . Before pull-requesting I would like to highlight these two changes:
* I've updated [Shapely](https://pypi.org/project/shapely/1.2.18/#history) (1.7.0 -> 1.8.1) and [Geopandas](https://pypi.org/project/geopandas/#history) (0.7 -> 0.11.0) as they were outdated. This was causing broken dependencies across all operating systems (Windows, Linux, MacOS) on [Python 3.9 or above ](https://github.com/vss-2/geobr/actions/runs/5940669320) but not in Python 3.8 (probably they last supported version);
* The `list_geobr()` function [here](https://github.com/ipeaGIT/geobr/blob/691cb3bdeabb8d5379be8370afeab768b639ee87/python-package/geobr/list_geobr.py#L19) uses [pd.read_html()](https://pandas.pydata.org/docs/reference/api/pandas.read_html.html) which requires BeautifulSoup4 and html5lib (were also added to `requirements.txt`). Initially, this test was failing in all environments. Turns out (some error warned me) the current implementation of BeautifulSoup doesn't fetch and download HTML files, so I've [modified this way](https://github.com/vss-2/geobr/commit/f3a054a87b652eb8181b1814ea562a6e5f1a3db6) to maintain the expected behavior;
The current workflow ([Python-CMD-check](https://github.com/vss-2/geobr/blob/90019b9532a5fa38f94899096892d56f2c5e9916/.github/workflows/Python-CMD-check.yaml)) is configured to:
* Check `python-package/tests/` for every push event on any branch;
* Run test job on the latest versions of: Ubuntu, MacOS and Windows;
* For each OS: run tests using Python version 3.9 (*discuss further);
* Install specific dependencies (step made specifically because shapely sometimes requires OS-native GEOS lib);
* Install pip requirements.txt, save dependencies and start pytest/coverage;
* Upload test-results.txt containing coverage info to workflow page (currently we're at 88% total coverage ð¥³);
I would like to know what other Python versions should I add to testing workflow. My intention was to add versions from 3.8 to 3.11 (current maintened versions).
Hi @vss-2 . This looks great! thanks so much. This is a great contribution to make the Python version of geobr more robust. Regarding the versions of Python that should be tested, testing versions from 3.8 to 3.11 is fine. Please go ahead and open the PR. | 2023-08-28T15:14:07 | 0.0 | [] | [] |
||
kiwicampus/ros2_graph | kiwicampus__ros2_graph-17 | b4c8d5bf3f1998e9036c632d4c5bce2f3d808523 | diff --git a/ros2_graph/graph_generator.py b/ros2_graph/graph_generator.py
index a99cbb2..8d8dfd2 100644
--- a/ros2_graph/graph_generator.py
+++ b/ros2_graph/graph_generator.py
@@ -430,7 +430,7 @@ def get_node_graph(node, links_count):
node, action_servers, clients=True
)
mermaid_graph_description.extend(mermaid_list)
- links_count += links_count_sserver
+ links_count += links_count_aserver
action_links = list(range(start_action_links, links_count))
| [bug] : syntax error occured, trying to view in vscode
## Steps to reproduce the error
1. generated the `turtle_diagram.md` file using
```bash
ros2_graph /turtlesim -o turtle_diagram.md
```
2. tried to view the mark down file in vscode via [Markdown All in One](https://marketplace.visualstudio.com/items?itemName=yzhang.markdown-all-in-one) extension and the following error popped up, it shows syntax error

| Hi, as I can see, we use the same extension [Markdown Preview Mermaid Support](https://marketplace.visualstudio.com/items?itemName=bierner.markdown-mermaid), please verify its version.
> [Markdown Preview Mermaid Support](https://marketplace.visualstudio.com/items?itemName=bierner.markdown-mermaid), please verify its version.
I have `version 1.17.4` of the above mentioned extension installed and still I am getting the same issue.
<br>

@lonebots
I use the same extension and it works perfectly.
The only issue I got is when I what to manually change the graph (name or delete "clock") I have the following error:
`Cannot set properties of undefined (setting 'style')`
It's due to the last line:
`linkStyle 23,24 fill:none,stroke:green;`
That change the color of the arrows for actions and specify the links by number.
Hence removing link makes it break.
I removed the line because I don't use action for now. And I was able to remove nodes.
> The only issue I got is when I what to manually change the graph (name or delete "clock") I have the following error: `Cannot set properties of undefined (setting 'style')`
>
> It's due to the last line: `linkStyle 23,24 fill:none,stroke:green;` That change the color of the arrows for actions and specify the links by number.
>
> Hence removing link makes it break.
>
> I removed the line because I don't use action for now. And I was able to remove nodes.
@BriceRenaudeau would it be a nice feature to be able to exclude services/actions/topics from the graph by a flag?
It would be a good feature to have some flags (--actions --services --keys).
But in my opinion, this is not a priority as we can remove it manually from the text.
I would prefer a more readable file (comments, clean declarations, separated links) for easy adjustments.
In my case the "clock" topic is useless but some people may keep it.
You may even use a .yaml file to allow user-defined links ("===", "---" or "...")
> I use the same extension and it works perfectly.
Hi, I don't understand what's happening here, for me the error still prevails and I am not able to get the graphs on vscode. but the code generated by `ros2_graph` works flawlessly in the GitHub markdown preview.
> > I use the same extension and it works perfectly.
>
> Hi, I don't understand what's happening here, for me the error still prevails and I am not able to get the graphs on vscode. but the code generated by `ros2_graph` works flawlessly in the GitHub markdown preview.
There is something that worries me in your case, the error says that you are using Mermaid version 8.8.3 but the latest version of the vs code extension supports Mermaid version 9.3.0. Have you made any changes to the extension settings?
> Have you made any changes to the extension settings?
No, I haven't made any changes to extension settings, anyways I will have a check on that once more.
Actually I tried to run a simple mermaid markdown and it worked for me
I will share the markdown and the output down here,

New info.
It was working perfectly but now it doesn't. (same code)
- mermaid version 9.3.0
- Cannot set properties of undefined (setting 'style')
There is an issue with the **linkStyle** having number out of range.
it happens when there is actions .
it's a wrong addition in the link count.
https://github.com/kiwicampus/ros2_graph/blob/b4c8d5bf3f1998e9036c632d4c5bce2f3d808523/ros2_graph/graph_generator.py#L433
must be `links_count += links_count_aserver`
adding action links and not service links | 2023-02-07T16:34:13 | 0.0 | [] | [] |
||
ivre/ivre | ivre__ivre-1604 | 08eca7e6b0b8aa2e299540218a946e26add104bb | diff --git a/ivre/db/__init__.py b/ivre/db/__init__.py
index da5d684d50..ce2d2ee11d 100644
--- a/ivre/db/__init__.py
+++ b/ivre/db/__init__.py
@@ -2281,6 +2281,8 @@ def store_scan(self, fname, **kargs):
store_scan_function = self.store_scan_json_zdns_recursion
elif "resolver" in firstres:
store_scan_function = self.store_scan_json_dnsx
+ elif "host" in firstres:
+ store_scan_function = self.store_scan_json_dismap
else:
raise ValueError( # pylint: disable=raise-missing-from
f"Unknown file type {fname}"
@@ -3529,164 +3531,180 @@ def store_scan_json_dismap(
tags = []
self.start_store_hosts()
with utils.open_file(fname) as fdesc:
- try:
- data = json.load(fdesc)
- except (UnicodeDecodeError, json.JSONDecodeError):
- utils.LOGGER.error("Cannot read file %r", fname, exc_info=True)
+ fchar = fdesc.read(1)
+ with utils.open_file(fname) as fdesc:
+ if fchar == b"[":
+ try:
+ data = json.load(fdesc)
+ except (UnicodeDecodeError, json.JSONDecodeError):
+ utils.LOGGER.error("Cannot read file %r", fname, exc_info=True)
+ return False
+ elif fchar == b"{":
+ data = (json.loads(line) for line in fdesc)
+ else:
+ utils.LOGGER.error(
+ "Cannot read file %r, invalid start byte %r", fname, fchar
+ )
return False
- timestamp = str(datetime.fromtimestamp(os.stat(fname).st_mtime))
- tags = re.compile("\\[[^]]*\\]")
- http_split = re.compile(b"\r?\n\r?\n")
- http_hdr_split = re.compile(b"\r?\n")
- for rec in data:
- port = {
- "protocol": rec["type"],
- "port": rec["port"],
- "state_state": rec["status"],
- "state_reason": "response",
- }
- if port["protocol"] == "tls":
- port["protocol"] = "tcp"
- port["service_tunnel"] = "ssl"
- if rec.get("protocol"):
- port["service_name"] = rec["protocol"]
- if port["service_name"] == "https":
- port["service_name"] = "http"
+ timestamp = str(datetime.fromtimestamp(os.stat(fname).st_mtime))
+ res_tags = re.compile("\\[[^]]*\\]")
+ http_split = re.compile(b"\r?\n\r?\n")
+ http_hdr_split = re.compile(b"\r?\n")
+ for rec in data:
+ port = {
+ "protocol": rec["type"],
+ "port": rec["port"],
+ "state_state": rec["status"],
+ "state_reason": "response",
+ }
+ if port["protocol"] == "tls":
+ port["protocol"] = "tcp"
port["service_tunnel"] = "ssl"
- host = {
- "addr": rec["host"],
- "state": "up",
- "schema_version": xmlnmap.SCHEMA_VERSION,
- "starttime": timestamp,
- "endtime": timestamp,
- "ports": [port],
- }
- if rec.get("identify.bool"):
- tags_val = [m.group() for m in tags.finditer(rec["identify.string"])]
- if tags_val and tags_val[0].isdigit():
- structured = {"status": tags_val.pop(0)}
- else:
- structured = {}
- structured["tags"] = tags_val
- port.setdefault("scripts", []).append(
- {
- "id": "dismap-identify",
- "output": rec.get("identify.string", ""),
- "dismap-identify": structured,
- }
- )
- if rec.get("banner.byte"):
- raw_output = utils.decode_hex(rec["banner.byte"])
- if port["protocol"] == "tcp" and rec.get("protocol") == "http":
- probe = "GetRequest"
- else:
- probe = "NULL"
- nmap_info = utils.match_nmap_svc_fp(
- output=raw_output,
- proto=port["protocol"],
- probe=probe,
- )
- if nmap_info:
- try:
- del nmap_info["soft"]
- except KeyError:
- pass
- add_cpe_values(
- host,
- f"ports.port:{rec['port']}",
- nmap_info.pop("cpe", []),
+ if rec.get("protocol"):
+ port["service_name"] = rec["protocol"]
+ if port["service_name"] == "https":
+ port["service_name"] = "http"
+ port["service_tunnel"] = "ssl"
+ host = {
+ "addr": rec["host"],
+ "state": "up",
+ "schema_version": xmlnmap.SCHEMA_VERSION,
+ "starttime": timestamp,
+ "endtime": timestamp,
+ "ports": [port],
+ }
+ if rec.get("identify.bool"):
+ tags_val = [
+ m.group() for m in res_tags.finditer(rec["identify.string"])
+ ]
+ if tags_val and tags_val[0].isdigit():
+ structured = {"status": tags_val.pop(0)}
+ else:
+ structured = {}
+ structured["tags"] = tags_val
+ port.setdefault("scripts", []).append(
+ {
+ "id": "dismap-identify",
+ "output": rec.get("identify.string", ""),
+ "dismap-identify": structured,
+ }
)
- host["cpes"] = list(host["cpes"].values())
- for cpe in host["cpes"]:
- cpe["origins"] = sorted(cpe["origins"])
- if not host["cpes"]:
- del host["cpes"]
- port.update(nmap_info)
- xmlnmap.add_service_hostname(
- nmap_info,
- host.setdefault("hostnames", []),
+ if rec.get("banner.byte"):
+ raw_output = utils.decode_hex(rec["banner.byte"])
+ if port["protocol"] == "tcp" and rec.get("protocol") == "http":
+ probe = "GetRequest"
+ else:
+ probe = "NULL"
+ nmap_info = utils.match_nmap_svc_fp(
+ output=raw_output,
+ proto=port["protocol"],
+ probe=probe,
)
- if probe == "GetRequest":
- try:
- hdrs, body = http_split.split(raw_output, 1)
- except ValueError:
- hdrs = raw_output
- body = None
- # TODO http-headers / http-content
- hdrs_split = http_hdr_split.split(hdrs)
- if hdrs_split:
- hdr_output_list = [
- utils.nmap_encode_data(line) for line in hdrs_split
- ]
- # FIXME: method should be reported
- hdr_output_list.extend(["", "(Request type: GET)"])
- structured = [
- {
- "name": "_status",
- "value": utils.nmap_encode_data(hdrs_split[0].strip()),
- }
- ]
- structured.extend(
- {
- "name": utils.nmap_encode_data(hdrname).lower(),
- "value": utils.nmap_encode_data(hdrval),
- }
- for hdrname, hdrval in (
- m.groups()
- for m in (
- utils.RAW_HTTP_HEADER.search(part.strip())
- for part in hdrs_split
+ if nmap_info:
+ try:
+ del nmap_info["soft"]
+ except KeyError:
+ pass
+ add_cpe_values(
+ host,
+ f"ports.port:{rec['port']}",
+ nmap_info.pop("cpe", []),
+ )
+ host["cpes"] = list(host["cpes"].values())
+ for cpe in host["cpes"]:
+ cpe["origins"] = sorted(cpe["origins"])
+ if not host["cpes"]:
+ del host["cpes"]
+ port.update(nmap_info)
+ xmlnmap.add_service_hostname(
+ nmap_info,
+ host.setdefault("hostnames", []),
+ )
+ if probe == "GetRequest":
+ try:
+ hdrs, body = http_split.split(raw_output, 1)
+ except ValueError:
+ hdrs = raw_output
+ body = None
+ # TODO http-headers / http-content
+ hdrs_split = http_hdr_split.split(hdrs)
+ if hdrs_split:
+ hdr_output_list = [
+ utils.nmap_encode_data(line) for line in hdrs_split
+ ]
+ # FIXME: method should be reported
+ hdr_output_list.extend(["", "(Request type: GET)"])
+ structured = [
+ {
+ "name": "_status",
+ "value": utils.nmap_encode_data(
+ hdrs_split[0].strip()
+ ),
+ }
+ ]
+ structured.extend(
+ {
+ "name": utils.nmap_encode_data(hdrname).lower(),
+ "value": utils.nmap_encode_data(hdrval),
+ }
+ for hdrname, hdrval in (
+ m.groups()
+ for m in (
+ utils.RAW_HTTP_HEADER.search(part.strip())
+ for part in hdrs_split
+ )
+ if m
)
- if m
)
+ port.setdefault("scripts", []).append(
+ {
+ "id": "http-headers",
+ "output": "\n".join(hdr_output_list),
+ "http-headers": structured,
+ "masscan": {"raw": utils.encode_b64(hdrs).decode()},
+ }
+ )
+ # FIXME: path should be reported
+ handle_http_headers(host, port, structured, path="/")
+ if body:
+ port.setdefault("scripts", []).append(
+ {
+ "id": "http-content",
+ "output": utils.nmap_encode_data(body),
+ }
+ )
+ handle_http_content(host, port, body)
+ else:
+ banner = "".join(
+ chr(d)
+ if 32 <= d <= 126 or d in {9, 10, 13}
+ else "\\x%02x" % d
+ for d in raw_output
)
port.setdefault("scripts", []).append(
{
- "id": "http-headers",
- "output": "\n".join(hdr_output_list),
- "http-headers": structured,
- "masscan": {"raw": utils.encode_b64(hdrs).decode()},
- }
- )
- # FIXME: path should be reported
- handle_http_headers(host, port, structured, path="/")
- if body:
- port.setdefault("scripts", []).append(
- {
- "id": "http-content",
- "output": utils.nmap_encode_data(body),
+ "id": "banner",
+ "output": banner,
+ "masscan": {
+ "raw": utils.encode_b64(raw_output).decode(),
+ "encoded": banner,
+ },
}
)
- handle_http_content(host, port, body)
- else:
- banner = "".join(
- chr(d) if 32 <= d <= 126 or d in {9, 10, 13} else "\\x%02x" % d
- for d in raw_output
- )
- port.setdefault("scripts", []).append(
- {
- "id": "banner",
- "output": banner,
- "masscan": {
- "raw": utils.encode_b64(raw_output).decode(),
- "encoded": banner,
- },
- }
- )
- # remaining fields / TODO:
- # banner.string note path uri
- if categories:
- host["categories"] = categories
- if tags:
- add_tags(host, tags)
- if source is not None:
- host["source"] = source
- host = self.json2dbrec(host)
- self.store_host(host)
- if callback is not None:
- callback(host)
- self.stop_store_hosts()
- return True
+ # remaining fields / TODO:
+ # banner.string note path uri
+ if categories:
+ host["categories"] = categories
+ if tags:
+ add_tags(host, tags)
+ if source is not None:
+ host["source"] = source
+ host = self.json2dbrec(host)
+ self.store_host(host)
+ if callback is not None:
+ callback(host)
+ self.stop_store_hosts()
+ return True
class DBView(DBActive):
| Dismap support
Hi Pierre,
Is still dismap supported? We attempted to import the output of dismap in json format, but this error appears:
```
WARNING:ivre:Exception (file 'output.json')
Traceback (most recent call last):
File "/usr/local/lib/python3.9/dist-packages/ivre/db/init.py", line 2148, in store_scan
store_scan_function = {
KeyError: b'{'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/dist-packages/ivre/tools/[scan2db.py](http://scan2db.py/)", line 146, in main
if [database.store](http://database.store/)_scan(
File "/usr/local/lib/python3.9/dist-packages/ivre/db/init.py", line 2188, in store_scan
raise ValueError( # pylint: disable=raise-missing-from
ValueError: Unknown file type output.json
INFO:ivre:0 results imported.
```
We tried all of versions of dismap available.
Best Regards
| Hi there!
Yes, Dismap is supposed to be supported. Could you share a result you are trying to insert?
Thanks!
Hi Pierre,
Sorry for the delay of my answer. Here the file that we tried to import: [output.json](https://github.com/ivre/ivre/files/13885821/output.json)
We tried some others outputs but the result was the same.
Many Thanks! | 2024-01-11T23:37:16 | 0.0 | [] | [] |
||
justmedude/pylotoncycle | justmedude__pylotoncycle-20 | b6de2945ccac921f989b98d6169d5cdf17d52b08 | diff --git a/pylotoncycle/pylotoncycle.py b/pylotoncycle/pylotoncycle.py
index c86da6f..102d8b0 100755
--- a/pylotoncycle/pylotoncycle.py
+++ b/pylotoncycle/pylotoncycle.py
@@ -115,7 +115,7 @@ def GetRecentWorkouts(self, num_workouts=None):
for i in workout_list:
workout_id = i["id"]
- resp_summary = self.GetWorkoutSummaryById(workout_id)
+ performance_graph = self.GetWorkoutMetricsById(workout_id)
resp_workout = self.GetWorkoutById(workout_id)
if "instructor_id" in resp_workout["ride"]:
@@ -126,7 +126,7 @@ def GetRecentWorkouts(self, num_workouts=None):
"name": resp_workout["ride"]["instructor"]["name"]
}
- resp_workout["overall_summary"] = resp_summary
+ resp_workout["performance_graph"] = performance_graph
try:
resp_workout["instructor_name"] = resp_instructor["name"]
except KeyError:
@@ -140,10 +140,13 @@ def GetWorkoutSummaryById(self, workout_id):
return resp
def GetWorkoutMetricsById(self, workout_id, frequency=50):
- url = "%s/api/workout/%s/performance_graph?every_n=%s" % (
+ performance_frequency = (
+ "?every_n=%s" % (frequency) if frequency > 0 else ""
+ )
+ url = "%s/api/workout/%s/performance_graph%s" % (
self.base_url,
workout_id,
- frequency,
+ performance_frequency,
)
resp = self.GetUrl(url)
return resp
| total_heart_rate_zone_durations data isn't correct
I'm trying to figure out where this data is coming from or how it's getting generated.
Comparing the effort_zones data I pulled out of inspector vs what shows up in the JSON are very different.
For my recent 15 minute ride here's the data I get.
From Peloton performance_graph.json. The numbers here correlate to seconds.
```
{
"effort_zones": {
"total_effort_points": 29.4,
"heart_rate_zone_durations": {
"heart_rate_z1_duration": 0,
"heart_rate_z2_duration": 94,
"heart_rate_z3_duration": 187,
"heart_rate_z4_duration": 617,
"heart_rate_z5_duration": 0
}
}
}
```
From pylotoncycle I get the following. I can't tell what the numbers correlate to, and taken to percentages it's still not comparable to the peloton data.
```
"total_heart_rate_zone_durations": {
"heart_rate_z1_duration": 299,
"heart_rate_z2_duration": 505,
"heart_rate_z3_duration": 1766,
"heart_rate_z4_duration": 1112,
"heart_rate_z5_duration": 0
},
```
I checked my rides and it's not a one off it's consistently very different data.
Is this intentional? If so, what units are being used for the heart rate zones? Is there a way to convert or just get the raw data?
Thanks!
| Ahh ok. I did a bunch more digging and it looks like it's the average heart rate data for everyone who's taken the class. Not sure if that's intentional and you want to leave it or if you want to swap to the data from performance_graph.
I have an idea here. I'll throw together a PR and we'll see how you feel about it. | 2024-01-18T21:26:56 | 0.0 | [] | [] |
||
ivre/ivre | ivre__ivre-1266 | f969ad0cb09b3494f57b8f4031690d3f8074ec35 | diff --git a/ivre/db/__init__.py b/ivre/db/__init__.py
index c9aa2d657c..1be512f798 100644
--- a/ivre/db/__init__.py
+++ b/ivre/db/__init__.py
@@ -2731,6 +2731,8 @@ def store_scan_json_nuclei(
name = rec["name"]
if "matcher_name" in rec:
name += " (%s)" % rec["matcher_name"]
+ elif "matcher-name" in rec:
+ name += " (%s)" % rec["matcher-name"]
script_id = "%s-nuclei" % (rec["type"])
scripts = [
{
| Nuclei
Hi, while testing nuclei and httpx new support, i detected a bug :
### IVRE Version
```
$ ivre version
IVRE - Network recon framework
Copyright 2011 - 2020 Pierre LALET <[email protected]>
Version 0.9.16.dev328
Python 3.8.10 (default, Nov 26 2021, 20:14:08)
[GCC 9.3.0]
Linux dsec 5.11.0-37-generic #41~20.04.2-Ubuntu SMP Fri Sep 24 09:06:38 UTC 2021 x86_64
Dependencies:
Python module pymongo: 3.11.0
Python module sqlalchemy: missing
Python module psycopg2: missing
Python module cryptography: 2.8
Python module krbV: missing
Python module pycurl: missing
Python module PIL: 8.4.0
Python module MySQLdb: missing
Python module dbus: 1.2.16
Python module matplotlib: 3.1.2
Python module bottle: 0.12.18
Python module OpenSSL: 19.0.0
Python module tinydb: missing
```
```
$ nuclei -version
__ _
____ __ _______/ /__ (_)
/ __ \/ / / / ___/ / _ \/ /
/ / / / /_/ / /__/ / __/ /
/_/ /_/\__,_/\___/_/\___/_/ 2.5.7
projectdiscovery.io
[WRN] Use with caution. You are responsible for your actions.
[WRN] Developers assume no liability and are not responsible for any misuse or damage.
[INF] Current Version: 2.5.7
```
### Summary
In the Nuclei import module, matcher-name is not include in the response
### Expected behavior
[info] FingerprintHub Technology Fingerprint found at http://domain.tld
vs
[info] FingerprintHub Technology Fingerprint (microsoft-exchange) found at http://domain.tld
### Actual behavior
it seems it's just a mistake in the json parser :
replace matcher_name by matcher-name on lines 2732 and 2733 of __init__.py
(sorry i tried to make my first merge request ... but i had a pgp error)
```
git diff master
diff --git a/ivre/db/__init__.py b/ivre/db/__init__.py
index c9aa2d65..4f2755bb 100644
--- a/ivre/db/__init__.py
+++ b/ivre/db/__init__.py
@@ -2729,8 +2729,8 @@ class DBNmap(DBActive):
elif "template-id" in rec:
rec["template"] = rec.pop("template-id")
name = rec["name"]
- if "matcher_name" in rec:
- name += " (%s)" % rec["matcher_name"]
+ if "matcher-name" in rec:
+ name += " (%s)" % rec["matcher-name"]
script_id = "%s-nuclei" % (rec["type"])
scripts = [
{
```
Best regards
| Hi,
Actually, this is (again) a difference between format versions, both are valid...
See projectdiscovery/nuclei@2b9bd7e9c3953c63dc967461f8542b2d9bc3c640
I'll have a look, thanks a lot for reporting this! | 2021-12-20T23:20:21 | 0.0 | [] | [] |
||
adapter-hub/adapters | adapter-hub__adapters-682 | 2652d272c9f5258f8b55982daacf36ce08326e1c | diff --git a/docs/method_combinations.md b/docs/method_combinations.md
index 3205b41f98..4bd57ac10c 100644
--- a/docs/method_combinations.md
+++ b/docs/method_combinations.md
@@ -92,7 +92,7 @@ which is identical to the following `ConfigUnion`:
from adapters import ConfigUnion, LoRAConfig, PrefixTuningConfig, SeqBnConfig
config = ConfigUnion(
- LoRAConfig(r=8, use_gating=True),
+ LoRAConfig(r=8, alpha=2, use_gating=True),
PrefixTuningConfig(prefix_length=10, use_gating=True),
SeqBnConfig(reduction_factor=16, use_gating=True),
)
diff --git a/src/adapters/configuration/adapter_config.py b/src/adapters/configuration/adapter_config.py
index ef401d17a7..b7855258db 100644
--- a/src/adapters/configuration/adapter_config.py
+++ b/src/adapters/configuration/adapter_config.py
@@ -621,7 +621,7 @@ def __init__(
components = [
prefix_tuning or PrefixTuningConfig(prefix_length=10),
adapter or SeqBnConfig(reduction_factor=16),
- lora or LoRAConfig(r=8),
+ lora or LoRAConfig(r=8, alpha=2),
]
super().__init__(*[c.replace(use_gating=True) for c in components])
| UNIPELT original paper use alpha = 2 for Lora while the UNIPELT implementation in adapter has a alpha = 8
the LoRA config inside of unipelt have a default alpha = 8 according to documentation
however the in the original paper of UNIPELT, it set alpha = 2. (https://arxiv.org/abs/2110.07577)
Why is that? did I missunderstand something here?
<img width="2346" alt="image" src="https://github.com/adapter-hub/adapters/assets/54015474/aed48a20-5923-421e-9cd5-39f8a73e659b">
<img width="988" alt="image" src="https://github.com/adapter-hub/adapters/assets/54015474/7b7fad19-73e0-47d3-a78c-e9e2b66e3fee">
| 2024-04-16T22:03:34 | 0.0 | [] | [] |
|||
davidbrochart/nbterm | davidbrochart__nbterm-55 | 0bae34a4c4a9d07cadea3fa0fbabefba26c36d9a | diff --git a/nbterm/cell.py b/nbterm/cell.py
index 0a3862c..e8a7933 100644
--- a/nbterm/cell.py
+++ b/nbterm/cell.py
@@ -55,6 +55,8 @@ def get_output_text_and_height(outputs: List[Dict[str, Any]]):
continue
text_list.append(text)
text_ansi = ANSI("".join(text_list))
+ if text_ansi and not height:
+ height = 1
return text_ansi, height
| Previous outputs included when notebook run from the command line
When I use "nbterm --run foo.ipynb" to execute a notebook, any cell outputs that are currently in the file are included in "foo_run.ipynb". New outputs are just appened to old outputs, with execution counts that start over from 1. I would think nbterm would behave like Jupyter, where a new kernel erases old outputs.
| Thanks for reporting the issue @conery, I'll have a look soon.
I tried on a simple example with one cell:
```python
print("hello")
```
And running the notebook with `nbterm --run hello.ipynb` works as expected, the cell output shows `hello`.
What version of nbterm are you using? Could you share a notebook that shows the issue?
Hi David â
I have version 0.0.11, running on an iMac with macOS 11.6.
Here are two screenshots from Jupyter. I executed the three cells in order, then went back to the middle cell and executed it again, which is why it has an execution count of 4. I saved this notebook, executed it with nbterm, and opened the output file. The second screen shot shows the issue â the code cell outputs with execution counts from the saved version are there, and the new outputs made when nbterm ran the notebook have been appended.
FWIW, Iâm writing my own version of a notebook grader. Itâs running nbterm in a Docker container to execute notebooks submitted by students. It will be very easy for me to clear all the outputs first (since I have to preprocess the notebooks anyway) so this isnât a high priority issue for me, but it seemed like something you would want to see.
And (this should have been part of my original post) thanks for making nbterm available!
John
[cid:C928381F-FE32-449F-A4AE-1482A1E568AC]
[cid:2A542E4D-295D-4600-8453-A012D56509AD]
On Dec 11, 2021, at 7:56 AM, David Brochart ***@***.******@***.***>> wrote:
I tried on a simple example with one cell:
print("hello")
And running the notebook with nbterm --run hello.ipynb works as expected, the cell output shows hello.
What version of nbterm are you using? Could you share a notebook that shows the issue?
â
You are receiving this because you were mentioned.
Reply to this email directly, view it on GitHub<https://urldefense.com/v3/__https://github.com/davidbrochart/nbterm/issues/54*issuecomment-991692348__;Iw!!C5qS4YX3!V7giEDFca-mTORqPCnlyAwb8bAfW9iorU7PJvSwaKRLEfpwQLAFLf6G2eIp0JhXW$>, or unsubscribe<https://urldefense.com/v3/__https://github.com/notifications/unsubscribe-auth/AAA2AS6L64FGLKYLRUKJMV3UQNYCLANCNFSM5JUUQH2A__;!!C5qS4YX3!V7giEDFca-mTORqPCnlyAwb8bAfW9iorU7PJvSwaKRLEfpwQLAFLf6G2eEq8_-7_$>.
Triage notifications on the go with GitHub Mobile for iOS<https://urldefense.com/v3/__https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675__;!!C5qS4YX3!V7giEDFca-mTORqPCnlyAwb8bAfW9iorU7PJvSwaKRLEfpwQLAFLf6G2eLX-NXkq$> or Android<https://urldefense.com/v3/__https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign*3Dnotification-email*26utm_medium*3Demail*26utm_source*3Dgithub__;JSUlJSU!!C5qS4YX3!V7giEDFca-mTORqPCnlyAwb8bAfW9iorU7PJvSwaKRLEfpwQLAFLf6G2eB1h-ZGB$>.
Hi John, good to know nbterm is useful!
It seems that you didn't include the screenshots.
Now thatâs really weird! I can see them there in the message in my âsentâ folder.
Let me try sending them in separate messages.
JC
On Dec 11, 2021, at 10:03 AM, David Brochart ***@***.******@***.***>> wrote:
Hi John, good to know nbterm is useful!
It seems that you didn't include the screenshots.
â
You are receiving this because you were mentioned.
Reply to this email directly, view it on GitHub<https://urldefense.com/v3/__https://github.com/davidbrochart/nbterm/issues/54*issuecomment-991732377__;Iw!!C5qS4YX3!RZNKUpKBrpSwNLg7QfISvMzbqVneIK14_7LfkgB4PcQ0GlXERDe9aIbt9dhL837K$>, or unsubscribe<https://urldefense.com/v3/__https://github.com/notifications/unsubscribe-auth/AAA2AS362JQS4G5XKTATDQ3UQOHAZANCNFSM5JUUQH2A__;!!C5qS4YX3!RZNKUpKBrpSwNLg7QfISvMzbqVneIK14_7LfkgB4PcQ0GlXERDe9aIbt9Sm6gkVv$>.
Triage notifications on the go with GitHub Mobile for iOS<https://urldefense.com/v3/__https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675__;!!C5qS4YX3!RZNKUpKBrpSwNLg7QfISvMzbqVneIK14_7LfkgB4PcQ0GlXERDe9aIbt9RxWD8CJ$> or Android<https://urldefense.com/v3/__https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign*3Dnotification-email*26utm_medium*3Demail*26utm_source*3Dgithub__;JSUlJSU!!C5qS4YX3!RZNKUpKBrpSwNLg7QfISvMzbqVneIK14_7LfkgB4PcQ0GlXERDe9aIbt9T-VNWoC$>.
I'm not sure you can attach pictures when replying to a comment from an email.
Ah, got if. Let me try sending the notebook sourcesâ¦
JC
On Dec 11, 2021, at 10:45 AM, David Brochart ***@***.******@***.***>> wrote:
I'm not sure you can attach pictures when replying to a comment from an email.
â
You are receiving this because you were mentioned.
Reply to this email directly, view it on GitHub<https://urldefense.com/v3/__https://github.com/davidbrochart/nbterm/issues/54*issuecomment-991745259__;Iw!!C5qS4YX3!XpFvUmSxEpoAfQD_ZkrMf0rLK25RRLwbaB4EUmiKezXqQDaGMeSH5aNWr1QYfrEb$>, or unsubscribe<https://urldefense.com/v3/__https://github.com/notifications/unsubscribe-auth/AAA2AS5WSSLUC7EKD7T4A3TUQOL5ZANCNFSM5JUUQH2A__;!!C5qS4YX3!XpFvUmSxEpoAfQD_ZkrMf0rLK25RRLwbaB4EUmiKezXqQDaGMeSH5aNWr8xS2gOI$>.
Triage notifications on the go with GitHub Mobile for iOS<https://urldefense.com/v3/__https://apps.apple.com/app/apple-store/id1477376905?ct=notification-email&mt=8&pt=524675__;!!C5qS4YX3!XpFvUmSxEpoAfQD_ZkrMf0rLK25RRLwbaB4EUmiKezXqQDaGMeSH5aNWr3PIR79d$> or Android<https://urldefense.com/v3/__https://play.google.com/store/apps/details?id=com.github.android&referrer=utm_campaign*3Dnotification-email*26utm_medium*3Demail*26utm_source*3Dgithub__;JSUlJSU!!C5qS4YX3!XpFvUmSxEpoAfQD_ZkrMf0rLK25RRLwbaB4EUmiKezXqQDaGMeSH5aNWrwH5zr6I$>.
I meant that you cannot post documents by attaching them to your email, you need to use the GitHub UI.
D'oh! Sorry :-)
First snapshot: notebook as seen in Jupyter after executing cells in order, then going back and re-execuing the second cell.

Second snapthot: after using nbterm --run to execute the note, opening the output in Jupyter.

| 2021-12-12T19:19:02 | 0.0 | [] | [] |
||
SHI-Labs/NATTEN | SHI-Labs__NATTEN-167 | 88c9f5004ff436e3a38656c30e5a9413b823350e | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 516a35b..466e3c3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,16 @@
# Changelog
## [Main branch]
+* Enable KV parallelism by default
+ * No realistic use case will disable KV parallelism, because it virtually kills occupancy in any
+ small-batch/few-head case. Most packages should be using this by default, the same way PyTorch's
+ deterministic mode is disabled by default. Users will still get a warning if PyTorch or NATTEN's
+ deterministic mode is enabled. (#167)
+* Bug fixes
+ * Fix rare DDP issue (#167).
+ * Fix inconsistencies in docs. (#167)
+* QoL
+ * Switch from `torch.cuda.amp` to `torch.amp` since the former is deprecated (#168)
## [0.17.1] - 2024-05-19
* Fixed interface for python 3.8 and 3.9
diff --git a/csrc/src/pytorch/na1d.cpp b/csrc/src/pytorch/na1d.cpp
index 286624a..5ff50b3 100644
--- a/csrc/src/pytorch/na1d.cpp
+++ b/csrc/src/pytorch/na1d.cpp
@@ -26,6 +26,7 @@
#include <ATen/ATen.h>
#include <torch/extension.h>
+#include <c10/cuda/CUDAGuard.h>
#include <natten/natten.h>
#include <natten/pytorch/cpu/na1d.h>
@@ -50,6 +51,7 @@ void na1d_forward(
float attn_scale,
const std::tuple<int32_t>& query_tile_size,
const std::tuple<int32_t>& key_tile_size) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
CHECK_CONTIGUOUS(query);
CHECK_CONTIGUOUS(key);
@@ -109,6 +111,7 @@ void na1d_backward(
const std::tuple<int32_t>& key_tile_size,
const std::tuple<int32_t>& num_splits_key,
bool compute_delta_with_torch) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
// TODO: please please simplify these checks!!!
CHECK_CONTIGUOUS(query);
@@ -172,6 +175,7 @@ void na1d_qk_forward(
const std::tuple<int32_t>& kernel_size,
const std::tuple<int32_t>& dilation,
const std::tuple<bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -215,6 +219,7 @@ void na1d_qk_backward(
const std::tuple<int32_t>& kernel_size,
const std::tuple<int32_t>& dilation,
const std::tuple<bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !d_bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -262,6 +267,7 @@ void na1d_av_forward(
const std::tuple<int32_t>& kernel_size,
const std::tuple<int32_t>& dilation,
const std::tuple<bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(out);
CHECK_CONTIGUOUS(value);
CheckArgs(kernel_size, dilation);
@@ -297,6 +303,7 @@ void na1d_av_backward(
const std::tuple<int32_t>& kernel_size,
const std::tuple<int32_t>& dilation,
const std::tuple<bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(d_out);
CHECK_CONTIGUOUS(d_value);
CHECK_CONTIGUOUS(value);
diff --git a/csrc/src/pytorch/na2d.cpp b/csrc/src/pytorch/na2d.cpp
index 85f30cb..6787297 100644
--- a/csrc/src/pytorch/na2d.cpp
+++ b/csrc/src/pytorch/na2d.cpp
@@ -26,6 +26,7 @@
#include <ATen/ATen.h>
#include <torch/extension.h>
+#include <c10/cuda/CUDAGuard.h>
#include <natten/natten.h>
#include <natten/pytorch/cpu/na2d.h>
@@ -50,6 +51,7 @@ void na2d_forward(
float attn_scale,
const std::tuple<int32_t, int32_t>& query_tile_size,
const std::tuple<int32_t, int32_t>& key_tile_size) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
CHECK_CONTIGUOUS(query);
CHECK_CONTIGUOUS(key);
@@ -111,6 +113,7 @@ void na2d_backward(
const std::tuple<int32_t, int32_t>& key_tile_size,
const std::tuple<int32_t, int32_t>& num_splits_key,
bool compute_delta_with_torch) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
// TODO: please please simplify these checks!!!
CHECK_CONTIGUOUS(query);
@@ -176,6 +179,7 @@ void na2d_qk_forward(
const std::tuple<int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t>& dilation,
const std::tuple<bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -221,6 +225,7 @@ void na2d_qk_backward(
const std::tuple<int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t>& dilation,
const std::tuple<bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !d_bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -270,6 +275,7 @@ void na2d_av_forward(
const std::tuple<int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t>& dilation,
const std::tuple<bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(out);
CHECK_CONTIGUOUS(value);
CheckArgs(kernel_size, dilation);
@@ -307,6 +313,7 @@ void na2d_av_backward(
const std::tuple<int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t>& dilation,
const std::tuple<bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(d_out);
CHECK_CONTIGUOUS(d_value);
CHECK_CONTIGUOUS(value);
diff --git a/csrc/src/pytorch/na3d.cpp b/csrc/src/pytorch/na3d.cpp
index cf72d0d..b9e66e6 100644
--- a/csrc/src/pytorch/na3d.cpp
+++ b/csrc/src/pytorch/na3d.cpp
@@ -26,6 +26,7 @@
#include <ATen/ATen.h>
#include <torch/extension.h>
+#include <c10/cuda/CUDAGuard.h>
#include <natten/natten.h>
#include <natten/pytorch/cpu/na3d.h>
@@ -50,6 +51,7 @@ void na3d_forward(
float attn_scale,
const std::tuple<int32_t, int32_t, int32_t>& query_tile_size,
const std::tuple<int32_t, int32_t, int32_t>& key_tile_size) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
CHECK_CONTIGUOUS(query);
CHECK_CONTIGUOUS(key);
@@ -113,6 +115,7 @@ void na3d_backward(
const std::tuple<int32_t, int32_t, int32_t>& key_tile_size,
const std::tuple<int32_t, int32_t, int32_t>& num_splits_key,
bool compute_delta_with_torch) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
AssertDimsAre128BitAligned(query, value);
// TODO: please please simplify these checks!!!
CHECK_CONTIGUOUS(query);
@@ -180,6 +183,7 @@ void na3d_qk_forward(
const std::tuple<int32_t, int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t, int32_t>& dilation,
const std::tuple<bool, bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -227,6 +231,7 @@ void na3d_qk_backward(
const std::tuple<int32_t, int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t, int32_t>& dilation,
const std::tuple<bool, bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(query.device());
TORCH_CHECK(
!any_true(is_causal) || !d_bias.has_value(),
"Neighborhood attention with causal masking does not support positional biases yet.");
@@ -278,6 +283,7 @@ void na3d_av_forward(
const std::tuple<int32_t, int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t, int32_t>& dilation,
const std::tuple<bool, bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(out);
CHECK_CONTIGUOUS(value);
CheckArgs(kernel_size, dilation);
@@ -317,6 +323,7 @@ void na3d_av_backward(
const std::tuple<int32_t, int32_t, int32_t>& kernel_size,
const std::tuple<int32_t, int32_t, int32_t>& dilation,
const std::tuple<bool, bool, bool>& is_causal) {
+ at::cuda::OptionalCUDAGuard device_guard(attn.device());
CHECK_CONTIGUOUS(d_out);
CHECK_CONTIGUOUS(d_value);
CHECK_CONTIGUOUS(value);
diff --git a/docs/fna/fna-quickstart.md b/docs/fna/fna-quickstart.md
index 9bb6e88..acaa812 100644
--- a/docs/fna/fna-quickstart.md
+++ b/docs/fna/fna-quickstart.md
@@ -7,7 +7,11 @@ you really need is to enable FNA by importing and calling `natten.use_fused_na`:
```python
import natten
-natten.use_fused_na(True)
+# Use FNA with KV parallelism in backward pass (default)
+natten.use_fused_na()
+
+# Use FNA without KV parallelism in backward pass
+natten.use_fused_na(True, kv_parallel=False)
```
@@ -65,11 +69,8 @@ Here's a list of recommendations if you're just starting to use NATTEN or FNA:
`natten.is_fused_na_enabled()`; certain GPU architectures may not support fused NA,
and some applications may require unfused NA. Read more in [fused vs unfused NA](fused-vs-unfused.md).
-3. Consider using [KV parallelism](kv-parallelism.md) to potentially gain in performance if you can afford
- additional global memory usage. This may slightly affect reproducibility, as KV parallelism
- makes the computation of `dQ` non-deterministic, but this should rarely affect your training
- stability. Note that KV parallelism is not guaranteed to improve performance in all cases, but
- it is still a setting worth configuring if you're not bound by memory capacity.
+3. Read more about [KV parallelism](kv-parallelism.md) settings to potentially gain in performance
+ if you can afford additional global memory usage.
4. Consider using the [Autotuner](autotuner.md) during inference, and possibly during training.
diff --git a/docs/fna/kv-parallelism.md b/docs/fna/kv-parallelism.md
index a03db66..ae791f5 100644
--- a/docs/fna/kv-parallelism.md
+++ b/docs/fna/kv-parallelism.md
@@ -14,11 +14,11 @@ In this document, we outline how you can specify your preference for using KV pa
```python
import natten
-# Enable KV parallelism
-natten.use_kv_parallelism_in_fused_na(True)
-
# Disable KV parallelism
natten.use_kv_parallelism_in_fused_na(False)
+
+# Enable KV parallelism
+natten.use_kv_parallelism_in_fused_na(True)
```
### Memory usage preference
diff --git a/docs/frontend.md b/docs/frontend.md
index e877474..46b0740 100644
--- a/docs/frontend.md
+++ b/docs/frontend.md
@@ -55,12 +55,12 @@ of different settings, which you may want to adjust to your use case.
To force NATTEN torch modules (`NeighborhoodAttention1D`, `NeighborhoodAttention2D`, and `NeighborhoodAttention3D`) to use FNA:
```python
-from natten import enable_fused_na, disable_fused_na
+from natten import use_fused_na
-enable_fused_na()
+use_fused_na()
# Modules will start using fused neighborhood attention
-disable_fused_na()
+use_fused_na(False)
# Go back to BMM-style (default)
```
@@ -121,14 +121,14 @@ disable_autotuner()
For more information, refer to [autotuner guide](fna/autotuner.md).
#### Memory usage in FNA
-Training with Fused Neighborhood Attention can be accelerated at the expense of using more global memory by using
-KV parallelization. Depending on your use case (how big your memory footprint already is and what your memory cap is),
+Training with Fused Neighborhood Attention can be accelerated at the expense of using more global memory by changing
+KV parallelization settings. Depending on your use case (how big your memory footprint already is and what your memory cap is),
you can consider this option.
-KV parallelism is disabled by default, and makes the backward pass non-deterministic, which means that it can't be used with
+KV parallelism is enabled by default, but it makes the backward pass non-deterministic, which means that it can't be used with
[PyTorch's deterministic mode](https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html).
-To enable this feature:
+To disable/re-enable this feature:
```python
from natten import (
diff --git a/src/natten/__init__.py b/src/natten/__init__.py
index 241430a..9ce7f49 100644
--- a/src/natten/__init__.py
+++ b/src/natten/__init__.py
@@ -116,4 +116,4 @@
"disable_tiled_na",
]
-__version__ = "0.17.1"
+__version__ = "0.17.2.dev0"
diff --git a/src/natten/context.py b/src/natten/context.py
index cb6f021..fc27a35 100644
--- a/src/natten/context.py
+++ b/src/natten/context.py
@@ -122,10 +122,8 @@ def use_kv_parallelism_in_fused_na(mode: bool = True):
NattenContext.is_kv_parallelism_enabled = True
logger.warning(
"You're enabling KV parallelism in Fused Neighborhood Attention. "
- "This feature may improve backpropagation latency, but will use some "
- "additional memory, and is non-deterministic. It is not recommended "
- "for memory-limited experiments, or those likely to suffer from "
- "exploding gradients due to non-determinism. "
+ "This feature will improve backpropagation latency, but will use some "
+ "additional memory, and is non-deterministic. "
"For more information please refer to "
"https://github.com/SHI-Labs/NATTEN/blob/main/docs/fna/kv-parallelism.md"
)
@@ -135,7 +133,7 @@ def is_kv_parallelism_in_fused_na_enabled() -> bool:
return NattenContext.is_kv_parallelism_enabled
-def use_fused_na(mode: bool = True, kv_parallel: bool = False):
+def use_fused_na(mode: bool = True, kv_parallel: bool = True):
if not mode:
NattenContext.is_fused_na_enabled = False
use_kv_parallelism_in_fused_na(False)
| issue while training using DDP
First of all, thank you for your great contributions to this community.
Unfortunately, I had a problem while training on a DDP with multiple GPUs :(
Training under a single GPU is well-performing, but when I tried training using 2 GPUs, an error occurred in the second GPUâs process. (I used NeighborhoodAttention2D and an error occurred in "attn = attn.softmax(dim=-1)")
An error occurred with the "NATTEN failure: cutlass error: Error Internal at: 102" messages.
As I searched, another user reported this error number and answered with a contiguous problem, but I think this one is not related to contiguous().
My running environment is organized as follows:
GPU: NVIDIA A6000 x2
Pytorch version: 2.4.1
issue while training using DDP
First of all, thank you for your great contributions to this community.
Unfortunately, I had a problem while training on a DDP with multiple GPUs :(
Training under a single GPU is well-performing, but when I tried training using 2 GPUs, an error occurred in the second GPUâs process. (I used NeighborhoodAttention2D and an error occurred in "attn = attn.softmax(dim=-1)")
An error occurred with the "NATTEN failure: cutlass error: Error Internal at: 102" messages.
As I searched, another user reported this error number and answered with a contiguous problem, but I think this one is not related to contiguous().
My running environment is organized as follows:
GPU: NVIDIA A6000 x2
Pytorch version: 2.4.1
| Thank you for your interest.
Could you share the version of NATTEN you're using, and kernel size, dilation, and other parameters related to NA?
These are parameters and configs of my setting:
NATTEN version: 0.17.1
I used "NeighborhoodAttention2D" with (dim = 180, num_heads = 6, qkv_bias=False)
and all the other parameters are set as default
Is this FP16 or FP32?
Dim 180 is definitely misaligned for anything below FP32, so that might be the reason. But I'm very surprised an error isn't raised in the first place to prevent this. Looking into this.
I'm trying to training under FP32.
Also, same errors are occurred when I change the dimension :( (like 192...)
Okay that's concerning.
Could you please share a minimum steps/script to reproduce?
Also, if you're training in eager mode, it is also possible that some other operation is the issue. I'm assuming you've already tried running it with `CUDA_LAUNCH_BLOCKING=1`?
@alihassanijr can this be related to the CUDAStream bug that we saw a while ago (which you actually fixed in 0.17)?
I don't think so, and if it was fixed in 0.17 it shouldn't occur here since they're using 0.17.1.
CUDA_LAUNCH_BLOCKING=1 was something I had already tried.
However, ironically, when I created a minimum demo using only NA, it works well.
I think that there's some incompatibility with certain packets while running model with NA.
I'll report as soon as I find out.
I found out under which condition the error occurs!
This happens when the model is wrapped using distributed data parallel for training.
here is the demo:
[nattentest.py.zip](https://github.com/user-attachments/files/17091827/nattentest.py.zip)
Thank you for your interest.
Could you share the version of NATTEN you're using, and kernel size, dilation, and other parameters related to NA?
These are parameters and configs of my setting:
NATTEN version: 0.17.1
I used "NeighborhoodAttention2D" with (dim = 180, num_heads = 6, qkv_bias=False)
and all the other parameters are set as default
Is this FP16 or FP32?
Dim 180 is definitely misaligned for anything below FP32, so that might be the reason. But I'm very surprised an error isn't raised in the first place to prevent this. Looking into this.
I'm trying to training under FP32.
Also, same errors are occurred when I change the dimension :( (like 192...)
Okay that's concerning.
Could you please share a minimum steps/script to reproduce?
Also, if you're training in eager mode, it is also possible that some other operation is the issue. I'm assuming you've already tried running it with `CUDA_LAUNCH_BLOCKING=1`?
@alihassanijr can this be related to the CUDAStream bug that we saw a while ago (which you actually fixed in 0.17)?
I don't think so, and if it was fixed in 0.17 it shouldn't occur here since they're using 0.17.1.
CUDA_LAUNCH_BLOCKING=1 was something I had already tried.
However, ironically, when I created a minimum demo using only NA, it works well.
I think that there's some incompatibility with certain packets while running model with NA.
I'll report as soon as I find out.
I found out under which condition the error occurs!
This happens when the model is wrapped using distributed data parallel for training.
here is the demo:
[nattentest.py.zip](https://github.com/user-attachments/files/17091827/nattentest.py.zip)
| 2024-09-23T02:26:13 | 0.0 | [] | [] |
||
ise-uiuc/nnsmith | ise-uiuc__nnsmith-127 | 018e75973b7b1a54da0d57107df66b1a79db5499 | diff --git a/.github/actions/setup/action.yaml b/.github/actions/setup/action.yaml
index cb912a5..1ab7471 100644
--- a/.github/actions/setup/action.yaml
+++ b/.github/actions/setup/action.yaml
@@ -29,7 +29,7 @@ runs:
uses: actions/setup-python@v4
id: py
with:
- python-version: '3.8'
+ python-version: '3.9'
# cache: 'pip'
# cache-dependency-path: ${{ steps.set-dep-path.outputs.dep-path }}
- run: echo "cache hit ${{ steps.py.outputs.cache-hit }}"
diff --git a/nnsmith/backends/onnxruntime.py b/nnsmith/backends/onnxruntime.py
index cdc0810..bd69941 100644
--- a/nnsmith/backends/onnxruntime.py
+++ b/nnsmith/backends/onnxruntime.py
@@ -1,6 +1,5 @@
from typing import List
-import onnx
import onnxruntime as ort
from multipledispatch import dispatch
@@ -60,7 +59,7 @@ def make_backend(
sess_options.intra_op_num_threads = NNSMITH_ORT_INTRA_OP_THREAD
sess = ort.InferenceSession(
- onnx._serialize(model.native_model),
+ model.native_model.SerializeToString(),
providers=self.providers,
sess_options=sess_options,
)
diff --git a/nnsmith/backends/tensorrt.py b/nnsmith/backends/tensorrt.py
index 9e14b63..c25568c 100644
--- a/nnsmith/backends/tensorrt.py
+++ b/nnsmith/backends/tensorrt.py
@@ -2,7 +2,6 @@
from typing import List
import numpy as np
-import onnx
import pycuda.driver as cuda
import tensorrt as trt
from multipledispatch import dispatch
@@ -94,7 +93,7 @@ def build_engine_onnx(onnx_model):
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 2 << 30)
parser = trt.OnnxParser(network, trt.Logger(trt.Logger.WARNING))
# Load the Onnx model and parse it in order to populate the TensorRT network.
- if not parser.parse(onnx._serialize(onnx_model)):
+ if not parser.parse(onnx_model.SerializeToString()):
error_msg = ""
for error in range(parser.num_errors):
error_msg += str(parser.get_error(error))
diff --git a/nnsmith/backends/tflite.py b/nnsmith/backends/tflite.py
index 07f4867..02f4da1 100644
--- a/nnsmith/backends/tflite.py
+++ b/nnsmith/backends/tflite.py
@@ -132,6 +132,7 @@ def _tflite_content_from_converter(
converter configuarations: https://www.tensorflow.org/api_docs/python/tf/lite/TFLiteConverter#attributes_1
"""
# Ref: https://www.tensorflow.org/api_docs/python/tf/lite/TargetSpec
+ converter.experimental_new_converter = True
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS, # enable TensorFlow ops.
diff --git a/nnsmith/materialize/tensorflow/dialect.py b/nnsmith/materialize/tensorflow/dialect.py
index 7b490d5..b6ccb34 100644
--- a/nnsmith/materialize/tensorflow/dialect.py
+++ b/nnsmith/materialize/tensorflow/dialect.py
@@ -674,6 +674,9 @@ def _init_axis(self, input_shape: List[Union[int, z3.ExprRef]]):
for i in range(len(input_shape)):
if random.random() < 0.5: # prob
axis.append(i)
+ # TODO(@ganler): tflite crashes when axis is empty
+ # remove this when tf fixes https://github.com/tensorflow/tensorflow/issues/62679
+ axis = axis or [0]
self.extra_attrs["axis"] = axis
ConstraintCheck.le(len(self.extra_attrs["axis"]), len(input_shape))
if self.extra_attrs["axis"]:
diff --git a/requirements/sys/tensorrt.txt b/requirements/sys/tensorrt.txt
index 4b850b0..616f29f 100644
--- a/requirements/sys/tensorrt.txt
+++ b/requirements/sys/tensorrt.txt
@@ -1,3 +1,1 @@
---extra-index-url https://pypi.ngc.nvidia.com
-nvidia-pyindex
-nvidia-tensorrt
+tensorrt
| Problems encountered while compiling the onnx model
I created an nnsmith environment using Conda and installed the required libraries according to the installation tutorial "python3- m pip install" nnsmith [torch, onnx, tvm, onnxruntime] "-- upgrade" in cli.md. I can generate the onnx model normally, but I encountered an issue when trying to debug this model locally.
When I used the command "nnsmith.model_exec model.type=onnx backend.type=onnxruntime model.path=nnsmith_output/model.onnx", an AttributeError has occurred
module 'onnx' has no attribute '_serialize'

This seems to be an issue with the onnx version, so I checked the onnx version and found that the current onnx version is 1.15.0, then I reinstalled version 1.14.0 of onnx, then I run the same command again, the onnx model will compile and run smoothly.

| 
I entered the tvm environment to reinstall the required configuration for nnsmith and ran the command "nnsmith. model_exec model. type=onnx"\
Backend. type=onnxruntime\
Model. path=nnsmith_ Output/model.onnx\
Cmp. with='{type: tvm, optmax: true, target: CPU}' 'conducted differential testing, but encountered an error as shown in the above figure. Can someone help me identify the cause.
Thanks for reporting the issue. Looks like you are right, `onnx` updated their API in `1.15.0` to `load_from_string`. Please bear a bit by downgrading the onnx versions for now before I brought up a fix.
>  I entered the tvm environment to reinstall the required configuration for nnsmith and ran the command "nnsmith. model_exec model. type=onnx"\
>
> Backend. type=onnxruntime\
>
> Model. path=nnsmith_ Output/model.onnx\
>
> Cmp. with='{type: tvm, optmax: true, target: CPU}' 'conducted differential testing, but encountered an error as shown in the above figure. Can someone help me identify the cause.
Well, this is a TVM problem and nnsmith cannot help here.
More specifically, the pre-built TVM binary is compiled using a GLibC version which is newer than what your OS has right now. You can either recompile TVM locally to use your local glibc or install a newer glibc in conda.
> >  I entered the tvm environment to reinstall the required configuration for nnsmith and ran the command "nnsmith. model_exec model. type=onnx"\
> > Backend. type=onnxruntime\
> > Model. path=nnsmith_ Output/model.onnx\
> > Cmp. with='{type: tvm, optmax: true, target: CPU}' 'conducted differential testing, but encountered an error as shown in the above figure. Can someone help me identify the cause.
>
> Well, this is a TVM problem and nnsmith cannot help here.
>
> More specifically, the pre-built TVM binary is compiled using a GLibC version which is newer than what your OS has right now. You can either recompile TVM locally to use your local glibc or install a newer glibc in conda.
I understand. Thanks for your help. | 2023-12-22T02:16:15 | 0.0 | [] | [] |
||
neurodsp-tools/neurodsp | neurodsp-tools__neurodsp-277 | de30ce50ea46b299ee176ce0253646b596f41ca2 | diff --git a/neurodsp/sim/aperiodic.py b/neurodsp/sim/aperiodic.py
index 2e5fdaad..76bafcf3 100644
--- a/neurodsp/sim/aperiodic.py
+++ b/neurodsp/sim/aperiodic.py
@@ -11,14 +11,14 @@
from neurodsp.utils.checks import check_param_range
from neurodsp.utils.data import create_times, compute_nsamples
from neurodsp.utils.decorators import normalize
+from neurodsp.utils.norm import normalize_sig
from neurodsp.sim.utils import rotate_timeseries
from neurodsp.sim.transients import sim_synaptic_kernel
###################################################################################################
###################################################################################################
-@normalize
-def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):
+def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2, lam=None):
"""Simulate a Poisson population.
Parameters
@@ -31,6 +31,8 @@ def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):
Number of neurons in the simulated population.
firing_rate : float, optional, default: 2
Firing rate of individual neurons in the population.
+ lam : float, optional, default: None
+ Mean and variance of the Poisson distribution. None defaults to n_neurons * firing_rate.
Returns
-------
@@ -56,8 +58,8 @@ def sim_poisson_pop(n_seconds, fs, n_neurons=1000, firing_rate=2):
>>> sig = sim_poisson_pop(n_seconds=1, fs=500, n_neurons=1000, firing_rate=2)
"""
- # Poisson population rate signal scales with # of neurons and individual rate
- lam = n_neurons * firing_rate
+ # Poisson population rate signal scales with the number of neurons and firing rate
+ lam = n_neurons * firing_rate if lam is None else lam
# Variance is equal to the mean
sig = np.random.normal(loc=lam, scale=lam**0.5, size=compute_nsamples(n_seconds, fs))
@@ -118,8 +120,7 @@ def sim_synaptic_current(n_seconds, fs, n_neurons=1000, firing_rate=2.,
t_ker = 5. * tau_d
# Simulate an extra bit because the convolution will trim & turn off normalization
- sig = sim_poisson_pop((n_seconds + t_ker), fs, n_neurons, firing_rate,
- mean=None, variance=None)
+ sig = sim_poisson_pop((n_seconds + t_ker), fs, n_neurons, firing_rate)
ker = sim_synaptic_kernel(t_ker, fs, tau_r, tau_d)
sig = np.convolve(sig, ker, 'valid')[:compute_nsamples(n_seconds, fs)]
@@ -185,8 +186,7 @@ def sim_knee(n_seconds, fs, exponent1, exponent2, knee):
return sig
-@normalize
-def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
+def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5., norm=True):
"""Simulate a mean-reverting random walk, as an Ornstein-Uhlenbeck process.
Parameters
@@ -200,7 +200,9 @@ def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
mu : float, optional, default: 0.0
Mean of the random walk.
sigma : float, optional, default: 5.0
- Standard deviation of the random walk.
+ Scaling of the Wiener process (dWt).
+ norm : bool, optional, default: True
+ Whether to normalize the signal to the mean (mu) and variance ((sigma**2 / (2 * theta))).
Returns
-------
@@ -216,10 +218,13 @@ def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
Where:
- mu : mean
- - sigma : standard deviation
+ - sigma : Wiener scaling
- theta : memory scale
- dWt : increments of Wiener process, i.e. white noise
+ The Wiener scaling (sigma) differs from the standard deviation of the signal.
+ The standard deviation of the signal will instead equal: sigma / np.sqrt(2 * theta).
+
See the wikipedia page [1]_ for the integral solution.
References
@@ -244,6 +249,10 @@ def sim_random_walk(n_seconds, fs, theta=1., mu=0., sigma=5.):
sig = x0 * ex + mu * (1. - ex) + sigma * ex * \
np.cumsum(np.exp(theta * times) * np.sqrt(dt) * ws)
+ if norm:
+ variance = sigma ** 2 / (2 * theta)
+ sig = normalize_sig(sig, mean=mu, variance=variance)
+
return sig
| sim_poisson_pop bug: undesired normalization
sim\aperiodic.py includes a normalization decorator on line 18 which prevents the function sim_poisson_pop from returning a signal with the desired statistics.
| 2021-10-08T19:52:48 | 0.0 | [] | [] |
|||
ghammad/pyActigraphy | ghammad__pyActigraphy-124 | ab7913c7e00101668526e0a3f679eb6f79059301 | diff --git a/README.rst b/README.rst
index 18428350..b11c67ea 100644
--- a/README.rst
+++ b/README.rst
@@ -25,7 +25,7 @@ This package is meant to provide a comprehensive set of tools to:
* Actigraph: wGT3X-BT
* CamNtech: Actiwatch 4, 7, L(-Plus) and MotionWatch 8
- * Condor Instrument: ActTrust 2
+ * Condor Instrument: ActTrust 2 and ActLumus
* Daqtix: Daqtometer
* Respironics: Actiwatch 2 and Actiwatch Spectrum (plus)
* Tempatilumi (CE Brasil)
diff --git a/docs/source/api.rst b/docs/source/api.rst
index ffcd1278..24a75c04 100644
--- a/docs/source/api.rst
+++ b/docs/source/api.rst
@@ -18,7 +18,7 @@ Currently, the supported formats are:
* wGT3X-BT, Actigraph (.agd file format only);
* Actiwatch 4, 7, L(-Plus) and MotionWatch 8, CamNtech (.awd and .mtn);
-* ActTrust 2, Condor Instruments (.txt);
+* ActTrust 2, ActLumus, Condor Instruments (.txt);
* Daqtometer, Daqtix (.csv);
* Actiwatch 2 and Actiwatch Spectrum Plus, Philips Respironics (.csv)
* Tempatilumi (CE Brasil)
diff --git a/docs/source/pyActigraphy-Intro.ipynb b/docs/source/pyActigraphy-Intro.ipynb
index 235dee01..8a79d3af 100644
--- a/docs/source/pyActigraphy-Intro.ipynb
+++ b/docs/source/pyActigraphy-Intro.ipynb
@@ -77,6 +77,7 @@
"\n",
"* agd (Actigraph)\n",
"* atr (ActTrust)\n",
+ "* alu (ActLumus)\n",
"* awd (Actiwatch)\n",
"* dqt (DaqTix)\n",
"* mtn (MotionWatch8)\n",
@@ -91,6 +92,7 @@
"\n",
"* read_raw_agd\n",
"* read_raw_atr\n",
+ "* read_raw_alu\n",
"* read_raw_awd\n",
"* read_raw_dqt\n",
"* read_raw_mtn\n",
diff --git a/pyActigraphy/io/__init__.py b/pyActigraphy/io/__init__.py
index b57764cc..03fdaaa5 100644
--- a/pyActigraphy/io/__init__.py
+++ b/pyActigraphy/io/__init__.py
@@ -14,6 +14,7 @@
from .reader import read_raw
from .agd import read_raw_agd
from .atr import read_raw_atr
+from .alu import read_raw_alu
from .awd import read_raw_awd
from .bba import read_raw_bba
from .dqt import read_raw_dqt
@@ -27,6 +28,7 @@
"read_raw",
"read_raw_agd",
"read_raw_atr",
+ "read_raw_alu",
"read_raw_awd",
"read_raw_bba",
"read_raw_dqt",
diff --git a/pyActigraphy/io/alu/__init__.py b/pyActigraphy/io/alu/__init__.py
new file mode 100644
index 00000000..6234a34f
--- /dev/null
+++ b/pyActigraphy/io/alu/__init__.py
@@ -0,0 +1,11 @@
+"""Module to read ActLumus files."""
+
+# Authors: Grégory Hammad <[email protected]>, Carlos Baumont <[email protected]>
+#
+# License: BSD (3-clause)
+
+from .alu import RawALU
+
+from .alu import read_raw_alu
+
+__all__ = ["RawALU", "read_raw_alu"]
diff --git a/pyActigraphy/io/alu/alu.py b/pyActigraphy/io/alu/alu.py
new file mode 100644
index 00000000..11524d5e
--- /dev/null
+++ b/pyActigraphy/io/alu/alu.py
@@ -0,0 +1,347 @@
+import pandas as pd
+import os
+import re
+
+from ..base import BaseRaw
+from pyActigraphy.light import LightRecording
+
+
+class RawALU(BaseRaw):
+ r"""Raw object from .txt file recorded by ActLumus (Condor Instruments)
+
+ Parameters
+ ----------
+ input_fname: str
+ Path to the ActLumus file.
+ mode: str, optional
+ Activity sampling mode.
+ Available modes are: Proportional Integral Mode (PIM), Time Above
+ Threshold (TAT) and Zero Crossing Mode (ZCM).
+ Default is PIM.
+ start_time: datetime-like, optional
+ Read data from this time.
+ Default is None.
+ period: str, optional
+ Length of the read data.
+ Cf. #timeseries-offset-aliases in
+ <https://pandas.pydata.org/pandas-docs/stable/timeseries.html>.
+ Default is None (i.e all the data).
+ """
+
+ __default_modes = ["PIM", "PIMn", "TAT", "TATn", "ZCM", "ZCMn"]
+
+ def __init__(
+ self,
+ input_fname,
+ mode='PIM',
+ start_time=None,
+ period=None
+ ):
+
+ # get absolute file path
+ input_fname = os.path.abspath(input_fname)
+
+ # extract header and data size
+ header = {}
+ with open(input_fname) as fp:
+ first_line = fp.readline()
+ if not re.match(r"\+-*\+ \w+ \w+ \w+ \+-*\+", first_line):
+ raise ValueError(
+ "The input file ({}) does not ".format(input_fname)
+ + "seem to contain the usual header.\n Aborting."
+ )
+ for line in fp:
+ if '+-------------------' in line:
+ break
+ else:
+ chunks = line.strip().split(' : ')
+ if chunks:
+ header[chunks[0]] = chunks[1:]
+ if not header:
+ raise ValueError(
+ "The input file ({}) does not ".format(input_fname)
+ + "contain a header.\n Aborting."
+ )
+
+ # extract informations from the header
+ uuid = header['DEVICE_ID'][0]
+ name = header['SUBJECT_NAME'][0]
+ if (header['INTERVAL'][0] == '-'):
+ value = 60
+ else:
+ value = int(header['INTERVAL'][0])
+ freq = pd.Timedelta(value, unit='s')
+ self.__tat_thr = self.__extract_from_header(header, 'TAT_THRESHOLD')
+
+ index_data = pd.read_csv(
+ input_fname,
+ skiprows=len(header)+3,
+ sep=';',
+ parse_dates=True,
+ infer_datetime_format=True,
+ dayfirst=True,
+ index_col=[0]
+ ).resample(freq).sum()
+
+ self.__available_modes = sorted(list(
+ set(index_data.columns.values).intersection(
+ set(self.__default_modes))))
+
+ # Check requested sampling mode is available:
+ if mode not in self.__available_modes:
+ raise ValueError(
+ "The requested mode ({}) is not available".format(mode)
+ + " for this recording.\n"
+ + "Available modes are {}.".format(
+ self.__available_modes
+ )
+ )
+
+ if start_time is not None:
+ start_time = pd.to_datetime(start_time)
+ else:
+ start_time = index_data.index[0]
+
+ if period is not None:
+ period = pd.Timedelta(period)
+ stop_time = start_time+period
+ else:
+ stop_time = index_data.index[-1]
+ period = stop_time - start_time
+
+ index_data = index_data[start_time:stop_time]
+
+ # ACTIVITY
+ self.__activity = index_data[self.__available_modes]
+
+ # TEMP
+ self.__temperature = self.__extract_from_data(
+ index_data, 'TEMPERATURE'
+ )
+ self.__temperature_ext = self.__extract_from_data(
+ index_data, 'EXT TEMPERATURE'
+ )
+
+ # LIGHT
+ index_light = index_data.filter(
+ regex=("LIGHT|F[1-8]|CLEAR|MELANOPIC_LUX"))
+
+ # call __init__ function of the base class
+ super().__init__(
+ fpath=input_fname,
+ name=name,
+ uuid=uuid,
+ format='ALU',
+ axial_mode='tri-axial',
+ start_time=start_time,
+ period=period,
+ frequency=freq,
+ data=index_data[mode],
+ light=LightRecording(
+ name=name,
+ uuid=uuid,
+ data=index_light,
+ frequency=index_light.index.freq
+ ) if index_light is not None else None
+ )
+
+ @property
+ def available_modes(self):
+ r"""Available acquistion modes (PIM, ZCM, etc)"""
+ return self.__available_modes
+
+ @property
+ def PIM(self):
+ r"""Activity (in PIM mode)."""
+ return self.__extract_from_data(self.__activity, 'PIM')
+
+ @property
+ def PIMn(self):
+ r"""Activity (in normalized PIM mode)."""
+ return self.__extract_from_data(self.__activity, 'PIMn')
+
+ @property
+ def TAT(self):
+ r"""Activity (in TAT mode)."""
+ return self.__extract_from_data(self.__activity, 'TAT')
+
+ @property
+ def TATn(self):
+ r"""Activity (in normalized PIM mode)."""
+ return self.__extract_from_data(self.__activity, 'TATn')
+
+ @property
+ def ZCM(self):
+ r"""Activity (in ZCM mode)."""
+ return self.__extract_from_data(self.__activity, 'ZCM')
+
+ @property
+ def ZCMn(self):
+ r"""Activity (in normalized ZCM mode)."""
+ return self.__extract_from_data(self.__activity, 'ZCMn')
+
+ @property
+ def temperature(self):
+ r"""Value of the temperature (in ° C)."""
+ return self.__temperature
+
+ @property
+ def temperature_ext(self):
+ r"""Value of the external temperature (in ° C)."""
+ return self.__temperature_ext
+
+ @property
+ def amb_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("AMB LIGHT")
+
+ @property
+ def white_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("LIGHT")
+
+ @property
+ def red_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("RED LIGHT")
+
+ @property
+ def green_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("GREEN LIGHT")
+
+ @property
+ def blue_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("BLUE LIGHT")
+
+ @property
+ def ir_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("IR LIGHT")
+
+ @property
+ def uva_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("UVA LIGHT")
+
+ @property
+ def uvb_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("UVB LIGHT")
+
+ @property
+ def f1_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F1")
+
+ @property
+ def f2_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F2")
+
+ @property
+ def f3_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F3")
+
+ @property
+ def f4_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F4")
+
+ @property
+ def f5_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F5")
+
+ @property
+ def f6_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F6")
+
+ @property
+ def f7_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F7")
+
+ @property
+ def f8_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("F8")
+
+ @property
+ def clear_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("CLEAR")
+
+ @property
+ def melanopic_lux_light(self):
+ r"""Value of the light intensity in µw/cm²."""
+ return self.__extract_light_channel("MELANOPIC_LUX")
+
+ @property
+ def tat_threshold(self):
+ r"""Threshold used in the TAT mode."""
+ return self.__tat_thr
+
+ @classmethod
+ def __extract_from_header(cls, header, key):
+ if header.get(key, None) is not None:
+ if header.get(key, None) != '-':
+ return header[key][0]
+ else:
+ return 0
+
+ @classmethod
+ def __extract_from_data(cls, data, key):
+ if key in data.columns:
+ return data[key]
+ else:
+ return None
+
+ def __extract_light_channel(self, channel):
+ if self.light is None:
+ return None
+ else:
+ return self.light.get_channel(channel)
+
+
+def read_raw_alu(
+ input_fname,
+ mode='PIM',
+ start_time=None,
+ period=None
+):
+ r"""Reader function for .txt file recorded by ActLumus (Condor Instruments)
+
+ Parameters
+ ----------
+ input_fname: str
+ Path to the ActLumus file.
+ mode: str, optional
+ Activity sampling mode.
+ Available modes are: Proportional Integral Mode (PIM), Time Above
+ Threshold (TAT) and Zero Crossing Mode (ZCM).
+ Default is PIM.
+ start_time: datetime-like, optional
+ Read data from this time.
+ Default is None.
+ period: str, optional
+ Length of the read data.
+ Cf. #timeseries-offset-aliases in
+ <https://pandas.pydata.org/pandas-docs/stable/timeseries.html>.
+ Default is None (i.e all the data).
+
+ Returns
+ -------
+ raw : Instance of RawALU
+ An object containing raw ALU data
+ """
+
+ return RawALU(
+ input_fname=input_fname,
+ mode=mode,
+ start_time=start_time,
+ period=period
+ )
diff --git a/pyActigraphy/io/reader/reader.py b/pyActigraphy/io/reader/reader.py
index 2246a455..82ac473a 100644
--- a/pyActigraphy/io/reader/reader.py
+++ b/pyActigraphy/io/reader/reader.py
@@ -8,6 +8,7 @@
from joblib import Parallel, delayed
from ..agd import read_raw_agd
from ..atr import read_raw_atr
+from ..alu import read_raw_alu
from ..awd import read_raw_awd
from ..dqt import read_raw_dqt
from ..mtn import read_raw_mtn
@@ -231,6 +232,7 @@ def read_raw(
* AGD ((w)GT3X(+)), ActiGraph)
* ATR (ActTrust, Condor Instruments)
+ * ALU (ActLumus, Condor Instruments)
* AWD (ActiWatch 4, CamNtech)
* DQT (Daqtometers, Daqtix)
* MTN (MotionWatch8, CamNtech)
@@ -257,7 +259,7 @@ def read_raw(
An object containing raw data
"""
- supported_types = ['AGD', 'ATR', 'AWD', 'DQT', 'MTN', 'RPX', 'TAL']
+ supported_types = ['AGD', 'ATR', 'ALU', 'AWD', 'DQT', 'MTN', 'RPX', 'TAL']
if reader_type not in supported_types:
raise ValueError(
'Type {0} unsupported. Supported types: {1}'.format(
@@ -281,6 +283,9 @@ def parallel_reader(
'ATR': lambda files: parallel_reader(
n_jobs, read_raw_atr, files, prefer, verbose, **kwargs
),
+ 'ALU': lambda files: parallel_reader(
+ n_jobs, read_raw_alu, files, prefer, verbose, **kwargs
+ ),
'AWD': lambda files: parallel_reader(
n_jobs, read_raw_awd, files, prefer, verbose, **kwargs
),
diff --git a/pyActigraphy/sleep/scoring/csm.py b/pyActigraphy/sleep/scoring/csm.py
index 9c428b49..01ce24c3 100644
--- a/pyActigraphy/sleep/scoring/csm.py
+++ b/pyActigraphy/sleep/scoring/csm.py
@@ -123,7 +123,7 @@ def csm(
"""Condor Sleep Model
Sleep-wake scoring algorithm developed by Condor Instrument for their
- ActTrust devices.
+ ActTrust and ActLumus devices.
This algorithm works in a two-step fashion. First, it classifies all epochs
as wake or rest, as function of each epoch's score. Second, using a more
diff --git a/pyActigraphy/sleep/scoring_base.py b/pyActigraphy/sleep/scoring_base.py
index b29063c4..424eeb80 100644
--- a/pyActigraphy/sleep/scoring_base.py
+++ b/pyActigraphy/sleep/scoring_base.py
@@ -867,7 +867,7 @@ def CSM(
"""Condor Sleep Model
Sleep-wake scoring algorithm developed by Condor Instrument for their
- ActTrust devices.
+ ActTrust and ActLumus devices.
This algorithm works in a two-step fashion. First, it classifies all
epochs as wake or rest, as function of each epoch's score. Second,
@@ -903,11 +903,11 @@ def CSM(
csm : pandas.Series
Series of state indices.
"""
- # This algorithm has been developed for ActTrust devices from
+ # This algorithm has been developed for ActTrust and ActLumus devices from
# Condor Instrument. Verify if the reader has the appropriate type:
- if self.format != 'ATR':
+ if self.format != 'ATR' and self.format != 'ALU':
raise ValueError(
- "The CSM has been developed for ActTrust devices.\n"
+ "The CSM has been developed for ActTrust and ActLumus devices.\n"
"It has not been validated for other devices."
)
| Develop
PR for release v1.2
| 2023-04-21T18:19:14 | 0.0 | [] | [] |
|||
AstraZeneca/roo | AstraZeneca__roo-87 | 8494be385ef2120c6e7f871376bf7dce92005a19 | diff --git a/src/roo/sources/remote_source.py b/src/roo/sources/remote_source.py
index c79c584..c389632 100644
--- a/src/roo/sources/remote_source.py
+++ b/src/roo/sources/remote_source.py
@@ -249,7 +249,6 @@ def _is_package_entry(entry: Any) -> bool:
href = entry["href"]
return cast(bool, (
href.endswith("gz")
- and href == entry.string
and href != "PACKAGES.gz"
))
@@ -257,7 +256,7 @@ def _is_package_entry(entry: Any) -> bool:
def _is_dir_entry(entry: Any) -> bool:
"""Returns true if the html entry refers to a directory."""
href = entry["href"]
- return cast(bool, (href.endswith("/") and href == entry.string))
+ return cast(bool, (href.endswith("/")))
def _get_pkgfiles_and_dirs_at_url(session: Any, url: str) -> tuple:
| tidyverse makes locking fail because of data.table dependency
My best guess is because data.table is not a package found on CRAN
```
PS C:\Users\<user>\project> roo lock
Lockfile not found.
- tidyverse (2.0.0)
- broom (1.0.4)
- backports (1.4.1)
- dplyr (1.1.2)
- cli (3.6.1)
- generics (0.1.3)
- glue (1.6.2)
- lifecycle (1.0.3)
- cli (...)
- glue (...)
- rlang (1.1.1)
- magrittr (2.0.3)
- pillar (1.9.0)
- cli (...)
- fansi (1.0.4)
- glue (...)
- lifecycle (...)
- rlang (...)
- utf8 (1.2.3)
- vctrs (0.6.2)
- cli (...)
- glue (...)
- lifecycle (...)
- rlang (...)
- R6 (2.5.1)
- rlang (...)
- tibble (3.2.1)
- fansi (...)
- lifecycle (...)
- magrittr (...)
- pillar (...)
- pkgconfig (2.0.3)
- rlang (...)
- vctrs (...)
- tidyselect (1.2.0)
- cli (...)
- glue (...)
- lifecycle (...)
- rlang (...)
- vctrs (...)
- withr (2.5.0)
- vctrs (...)
- ellipsis (0.3.2)
- rlang (...)
- generics (...)
- glue (...)
- lifecycle (...)
- purrr (1.0.1)
- cli (...)
- lifecycle (...)
- magrittr (...)
- rlang (...)
- vctrs (...)
- rlang (...)
- stringr (1.5.0)
- cli (...)
- glue (...)
- lifecycle (...)
- magrittr (...)
- rlang (...)
- stringi (1.7.12)
- vctrs (...)
- tibble (...)
- tidyr (1.3.0)
- cli (...)
- dplyr (...)
- glue (...)
- lifecycle (...)
- magrittr (...)
- purrr (...)
- rlang (...)
- stringr (...)
- tibble (...)
- tidyselect (...)
- vctrs (...)
- cpp11 (0.4.3)
- conflicted (1.2.0)
- cli (...)
- memoise (2.0.1)
- rlang (...)
- cachem (1.0.8)
- rlang (...)
- fastmap (1.1.1)
- rlang (...)
- cli (...)
- dbplyr (2.3.2)
- blob (1.2.4)
- rlang (...)
- vctrs (...)
- cli (...)
- DBI (1.1.3)
- dplyr (...)
- glue (...)
- lifecycle (...)
- magrittr (...)
- pillar (...)
- purrr (...)
- R6 (...)
- rlang (...)
- tibble (...)
- tidyr (...)
- tidyselect (...)
- vctrs (...)
- withr (...)
- dplyr (...)
- dtplyr (1.3.1)
- cli (...)
Error: Unable to create lock file: Unable to find package for dependency data.table
PS C:\Users\<user>\project> cat .\rproject.toml
[tool.roo]
name = "mans"
version = "1.0.0"
[[tool.roo.source]]
name = "CRAN"
url = "https://cran.radicaldevelop.com/"
[tool.roo.dependencies]
tidyverse = "*"
```
| That's interesting. It should be there. I'll check and come back to you.
Ok, I traced it down. I'll fix it today and release 0.17.1. I'll keep you posted.
| 2023-06-20T12:50:05 | 0.0 | [] | [] |
||
mine-your-business/myb-nicehash-api | mine-your-business__myb-nicehash-api-4 | 795dbad795566b2a1b44ac8137f2fe09eda42898 | diff --git a/nicehash/client.py b/nicehash/client.py
index 8e287fd..1dbab76 100644
--- a/nicehash/client.py
+++ b/nicehash/client.py
@@ -79,10 +79,10 @@ def get_exchange_orderbook(self, market, limit):
class NiceHashPrivateApi:
- def __init__(self, host, organisation_id, key, secret, verbose=False):
+ def __init__(self, host, organization_id, key, secret, verbose=False):
self.key = key
self.secret = secret
- self.organisation_id = organisation_id
+ self.organization_id = organization_id
self.host = host
self.verbose = verbose
@@ -98,7 +98,7 @@ def request(self, method, path, query, body):
message += bytearray(xnonce, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
- message += bytearray(self.organisation_id, 'utf-8')
+ message += bytearray(self.organization_id, 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray('\x00', 'utf-8')
message += bytearray(method, 'utf-8')
@@ -120,7 +120,7 @@ def request(self, method, path, query, body):
'X-Nonce': xnonce,
'X-Auth': xauth,
'Content-Type': 'application/json',
- 'X-Organization-Id': self.organisation_id,
+ 'X-Organization-Id': self.organization_id,
'X-Request-Id': str(uuid.uuid4())
}
| Spelling: organization
https://github.com/mine-your-business/myb-nicehash-api/blob/795dbad795566b2a1b44ac8137f2fe09eda42898/nicehash/client.py#L82
organisation should be spelled organization.
| @luciussilanus technically, both are correct English. With `Organisation` being the British English version of the word but still valid in American English.
I believe the spelling in this instance was copied from an example provided by NiceHash which is currently based out of the British Virgin Islands, a British overseas territory.
That said, my personal preference is to use the `z` in the spelling so if you would like to fork the repo and submit a pull request to change the spelling, I would accept that change.
The more you know! | 2021-05-14T18:39:18 | 0.0 | [] | [] |
||
anufrievroman/waypaper | anufrievroman__waypaper-52 | 4120bf97d4455dbc06445013dd41e99c7051ec89 | diff --git a/waypaper/changer.py b/waypaper/changer.py
index 2678856..0e706a2 100644
--- a/waypaper/changer.py
+++ b/waypaper/changer.py
@@ -4,6 +4,7 @@
import time
from waypaper.config import Config
from waypaper.translations import Chinese, English, French, German, Polish, Russian
+import re
def change_wallpaper(image_path: str, cf: Config, monitor: str, txt: Chinese|English|French|German|Polish|Russian):
@@ -114,12 +115,29 @@ def change_wallpaper(image_path: str, cf: Config, monitor: str, txt: Chinese|Eng
time.sleep(1)
preload_command = ["hyprctl", "hyprpaper", "preload", image_path]
if monitor == "All":
- monitor = ""
- wallpaper_command = ["hyprctl", "hyprpaper", "wallpaper", f"{monitor},{image_path}"]
- unload_command = ["hyprctl", "hyprpaper", "unload", "all"]
- subprocess.Popen(unload_command)
- subprocess.Popen(preload_command)
- subprocess.Popen(wallpaper_command)
+ monitors: list = []
+ # Check available motitors (using hyprpaper):
+ query_output = str(subprocess.check_output(["hyprctl", "monitors"], encoding='utf-8'))
+ query_output = query_output.split('\n')
+ # Use a regular expression to get the lines that contain the monitor names:
+ query_output = list(filter(lambda line: re.match(r"Monitor [a-zA-Z-0-9]+ \(ID \d+\):", line),query_output))
+ for line in query_output:
+ monitors.append(line.split(' ')[1])
+ else:
+ monitors: list = [monitor]
+ for m in monitors:
+ wallpaper_command = ["hyprctl", "hyprpaper", "wallpaper", f"{m},{image_path}"]
+ unload_command = ["hyprctl", "hyprpaper", "unload", "all"]
+ result: str = ""
+ retry_counter: int = 0
+ while result != "ok" and retry_counter < 10:
+ try:
+ subprocess.check_output(unload_command, encoding="utf-8").strip()
+ subprocess.check_output(preload_command, encoding="utf-8").strip()
+ result = subprocess.check_output(wallpaper_command, encoding="utf-8").strip()
+ time.sleep(0.1)
+ except Exception:
+ retry_counter += 1
elif cf.backend == "none":
pass
| Support for Hyprpaper
Any ETA on support for [Hyprpaper](https://github.com/hyprwm/hyprpaper)?
| Hi, while Hyprland user myself, I have several problems with hyprpaper. Basically, this tool is very different from all other wallpaper tools, and frankly is pretty weird! For example, "A Wallpaper cannot be applied without preloading. The config is not reloaded dynamically". So, there are a few problems it creates:
1) hyprpaper does not even start without user first making proper config.
2) It can only load wallpapers 'preloaded' from that config, i.e. we cannot just pass any image path.
3) There is no interface like `hyprland imagepath.jpg` to set a wallpaper, so waypaper can't really use it like all other wallpaper tools. Instead, I'd need to edit user's config files to make it work, or something like that.
So, as far as I understand, hyprpaper is a truly minimalist tool for loading specific wallpapers to specific workspaces at the start of Hyprland and is not really meant to change wallpapers on the flight. If I misunderstand something here, please correct me :)
> Hi, while Hyprland user myself, I have several problems with hyprpaper. Basically, this tool is very different from all other wallpaper tools, and frankly is pretty weird! For example, "A Wallpaper cannot be applied without preloading. The config is not reloaded dynamically". So, there are a few problems it creates:
>
> 1. hyprpaper does not even start without user first making proper config.
> 2. It can only load wallpapers 'preloaded' from that config, i.e. we cannot just pass any image path.
> 3. There is no interface like `hyprland imagepath.jpg` to set a wallpaper, so waypaper can't really use it like all other wallpaper tools. Instead, I'd need to edit user's config files to make it work, or something like that.
>
> So, as far as I understand, hyprpaper is a truly minimalist tool for loading specific wallpapers to specific workspaces at the start of Hyprland and is not really meant to change wallpapers on the flight. If I misunderstand something here, please correct me :)
@anufrievroman You can use `hyprctl` or other hyprland ipc interface to load/unload and set a wallpaper (speeking as a hyprland and hyprpaper user).
For example with hyprctl, use this to change to a new wallpaper and unload the previous one:
```bash
hyprctl hyprpaper preload /path/to/wallpaper
hyprctl hyprpaper wallpaper MONITOR,/path/to/wallpaper
hyprctl hyprpaper unload /path/to/wallpaper
```
> To set a wallpaper for all monitors remove the monitor part in the command, to list all monitor names use `hyprctl monitors`
I'm currently doing this with scripts, but I wanted a GUI to select the wallpaper.
Hmm, that's interesting, thank you, I'll take a look then. But, this will only work on hyprland, right? I mean only when `hyprctl` is available?
> Hmm, that's interesting, thank you, I'll take a look then. But, this will only work on hyprland, right? I mean only when `hyprctl` is available?
Perhaps no one use hyprpaper in other window manager or desktop environment
I tested it a bit, and so far I see a few problems:
1) `hyprpaper` does not start without a config file and it does not work without a proper (non-empty) config file. So the user would have to configure it with at least one wall paper before using `waypaper`. I'd much prefer if hyprpaper could start the demon without config file, but I guess we can live with that.
2) I was unable to run it without specifying a monitor `DP-1`. @mindstormjak mentioned that you can set wallpaper for all monitors, but I tried many versions of the command and only `hyprctl hyprpaper wallpaper DP-1,/path/to/wallpaper` worked for me. Am I missing something?
3) It's less important, but `hyprpaper` does not seem to support neither fitting options (fill, fit, etc) nor backgound color like other backends. So again the experience is somewhat inferior.
If anyone has any comments, especially about first two issues, please :)
I'm also waiting for it, you could try to ask the devs of hyprland maybe they can implement it or make a pull request yourself.
It is possible, of course, to work with `hyprpaper` to make it more usable, but may I ask everybody, is there any specific reason you prefer to use `hyprpaper` and not `swww` or `swaybg`? I mean, it's not even ships with Hyprland, you need to install it separately, so why not simply install `swww`, which has support for many things and works pretty well. Is it about the per workspace wallpapers?
It may well be because of the name, it seems like it is the package was supposed to be used on hyprland, and also everybody seems to be willing to lose their time trying to figure out a way to use it with hyprpaper rather than just switch to one of those. Who will understand the human being!
Thanks to @nikolaizombie1 preliminary work to make `hyprpaper` work is done. I think now the version in main should support setting wallpapers. Here a few remaining problems:
1. Selection of monitor is currently available only if `swww` is installed, because it's done via `swww-daemon`. Need to make it independent, perhaps via `hyprctl`
2. As I understand, fill types, background colors etc are not supported by `hyprpaper`. Need to remove those buttons when this backend is selected.
3. Creating a config file is needed for hyprpaper to work, so maybe there could be a popup message about it or something like that.
Hello and thank you accepting my pull request @anufrievroman .
> 1. Selection of monitor is currently available only if `swww` is installed, because it's done via `swww-daemon`. Need to make it independent, perhaps via `hyprctl`
I just created a new pull request separating the `hyprpaper` backend from `sww`.
> 2. As I understand, fill types, background colors etc are not supported by `hyprpaper`. Need to remove those buttons when this backend is selected.
This is correct. hyprpaper only supports display names and images, no fill options or colors.
> 3. Creating a config file is needed for hyprpaper to work, so maybe there could be a popup message about it or something like that.
This isn't strictly necessary. Although the `hyprpaper` documentation does not explicitly mention using `hyprpaper` without `hyprpaper.conf` I tested this on my Arch installationand it worked just fine. `hyprpaper.conf` only is there to set the wallpapers without invoking `hyprctl hyprpaper` directly.
I see, okay. Thank you very much for PR, sorry I wasn't able to check it today, I'll try to get to it as soon as I can.
It's all good. You can check on them whenever you can.
Also I did some more research and there are some options that cannot be set using `hrprctl hrprpaper`, those being
1. splash
2. splash_offset
3. splash_color
4. ipc
At the very least a note in the should be added that that these options are only available in the config file.
Thank you again for the latest commits, it seems to work, but for me it does not really work "out of the box", I'm getting some hyprpaper error and I wonder if that just lacks this library, or it's something else:
```
hyprpaper: /usr/lib/libstdc++.so.6: version `GLIBCXX_3.4.32' not found (required by hyprpaper)
hyprpaper: /usr/lib/libc.so.6: version `GLIBC_2.38' not found (required by hyprpaper)
hyprpaper: /usr/lib/libc.so.6: version `GLIBC_2.38' not found (required by /usr/lib/libhyprlang.so.2)
Couldn't connect to /tmp/hypr/f27873a6f06dc2f87600edb890f3c38298bfb55f_1716422609/.hyprpaper.sock. (3)
Couldn't connect to /tmp/hypr/f27873a6f06dc2f87600edb890f3c38298bfb55f_1716422609/.hyprpaper.sock. (3)
```
Thatâs odd. What distribution are you using to run hyprland and hyprpaper? I did my testing on Arch and it worked fine. Iâm going to test it on Nixos tomorrow and see if I can replicate the error.
Actually it turned out to be a global problem on my system, so disregard.
So, now the `hyprpaper` works for me, but not really :D
If I naively run `waypaper` it doesn't change the wallpaper due to:
```
Couldn't connect to /tmp/hypr/84ab8d11e8951a6551d1e1bf87796a8589da6d47_1716165482/.hyprpaper.sock. (3)
Couldn't connect to /tmp/hypr/84ab8d11e8951a6551d1e1bf87796a8589da6d47_1716165482/.hyprpaper.sock. (3)
```
which I suppose is a [common issue](https://github.com/hyprwm/hyprpaper/issues/5) with `hyprpaper`.
I investigated and if I create a proper config of `hyprpaper` it launches and sets the wallpaper, but it reports that:
```
[ERR] Couldn't listen on the hyprpaper Socket. (3) IPC will not work.
```
So, without IPC, the changing wallpaper vie `hyprctl` doesn't work, so `waypaper` doesn't work. I understand that it's my personal problem with `hyprpaper` at this point, but would be nice to know what is the reason and maybe mention it in the documentation before we officially release `hyprpaper` support.
On the sidenote, I noticed that while `hyprpaper` is running, if we change backend to `swww`, we can't set the wallpaper (or it remains behind the hyprpaper one), so will need add `killall hyprpaper` to the swww part of the `changer.py`, similar to how we deal with killing swaybg.
> Thank you again for the latest commits, it seems to work, but for me it does not really work "out of the box", I'm getting some hyprpaper error and I wonder if that just lacks this library, or it's something else:
>
> ```
> hyprpaper: /usr/lib/libstdc++.so.6: version `GLIBCXX_3.4.32' not found (required by hyprpaper)
> hyprpaper: /usr/lib/libc.so.6: version `GLIBC_2.38' not found (required by hyprpaper)
> hyprpaper: /usr/lib/libc.so.6: version `GLIBC_2.38' not found (required by /usr/lib/libhyprlang.so.2)
> Couldn't connect to /tmp/hypr/f27873a6f06dc2f87600edb890f3c38298bfb55f_1716422609/.hyprpaper.sock. (3)
> Couldn't connect to /tmp/hypr/f27873a6f06dc2f87600edb890f3c38298bfb55f_1716422609/.hyprpaper.sock. (3)
> ```
This would be great to document in a troubleshooting section of the README. A user might encounter the same problem and think that `waypaper` is defective.
> So, without IPC, the changing wallpaper vie `hyprctl` doesn't work, so `waypaper` doesn't work. I understand that it's my personal problem with `hyprpaper` at this point, but would be nice to know what is the reason and maybe mention it in the documentation before we officially release `hyprpaper` support.
I see, having inter process communication (IPC) enabled is required for `hyprctl` to communicate with `hyprpaper`. Having a troubleshooting section in the README to ensure that IPC enabled when using the `hyprpaper` backend would allow users to manually enable the feature without forcing a config file that we would choose.
> On the sidenote, I noticed that while `hyprpaper` is running, if we change backend to `swww`, we can't set the wallpaper (or it remains behind the hyprpaper one), so will need add `killall hyprpaper` to the swww part of the `changer.py`, similar to how we deal with killing swaybg.
I will quickly implement this fix as well.
Also, if I understood correctly, currently, we are preloading and setting wallpapers but never unloading them, so the hyprpaper just keeps them in RAM. I guess after setting new wallpaper we can try to unload previous wallpaper for the given monitor. Is it just `hyprctl hyprpaper unload all`?
I did the `killall hyprpaper` fix as well as options hiding on `hyprpaper` and unloading old wallpapers.
Still, I have [this bug](https://github.com/hyprwm/Hyprland/issues/642), so I can't really test `hyprpaper` backend. Please report if that work for you. Would be nice to know that it works for many people before we officially release this feature.
Edit: Personally, I always have [this issue with hyprpaper](https://github.com/hyprwm/hyprpaper/issues/170). Supposed to be fixed in the next version.
> I did the `killall hyprpaper` fix as well as options hiding on `hyprpaper` and unloading old wallpapers. Still, I have [this bug](https://github.com/hyprwm/Hyprland/issues/642), so I can't really test `hyprpaper` backend. Please report if that work for you. Would be nice to know that it works for many people before we officially release this feature.
Will do.
Finally, I made it work (with all the updates of all systems), but the only remaining issue that I experience is that the wallpaper changes only if I select the monitor (eDP-1), while if I keep it "All" the wall paper does not change. Does it work for you?
In the code everything seems fine, we set it as "" if monitor is "All", so it should work...
Edit: I tested and basically command `hyprctl hyprpaper wallpaper ",~/image.jpg"` simply doesn't work ([like that](https://github.com/hyprwm/hyprpaper/issues/122)), only with specific monitor name. Is it just on my system or someone else can confirm that?
> Finally, I made it work (with all the updates of all systems), but the only remaining issue that I experience is that the wallpaper changes only if I select the monitor (eDP-1), while if I keep it "All" the wall paper does not change. Does it work for you?
As far as I remember it did function correctly but I'll check first thing tomorrow and see if it works as intended.
I did more testing on another machine and with multiple monitors. Basically, sometimes it works, sometimes it fails randomly. I think at least part of the problem is that I often get ICP message that:
```
wallpaper failed (not preloaded)
```
This probably means that it has not enough time to preload. So, we should either increase sleep time, or even better, somehow make sure that it's preloaded before requesting the change. Maybe we can simply sequence preload `&&` change commands? | 2024-05-31T10:40:46 | 0.0 | [] | [] |
||
ivre/ivre | ivre__ivre-1583 | 5a2e645f349fc5d2e220356abea5c9d4e57099eb | diff --git a/ivre/tools/version.py b/ivre/tools/version.py
index b0efa95bb7..eed1152b19 100644
--- a/ivre/tools/version.py
+++ b/ivre/tools/version.py
@@ -42,9 +42,12 @@ def get_version(module: str) -> Optional[str]:
return None
for attr in ["__version__", "VERSION", "version"]:
try:
- return str(getattr(mod, attr))
+ data = getattr(mod, attr)
except AttributeError:
- pass
+ continue
+ if isinstance(data, tuple):
+ return ".".join(str(value) for value in data)
+ return str(data)
return "[unknown version]"
@@ -95,6 +98,8 @@ def main() -> None:
"pymongo",
"sqlalchemy",
"tinydb",
+ "elasticsearch",
+ "elasticsearch_dsl",
]:
version = get_version(module)
if version is None:
diff --git a/requirements-all.txt b/requirements-all.txt
index adb5677a25..ede8d5c513 100644
--- a/requirements-all.txt
+++ b/requirements-all.txt
@@ -4,7 +4,7 @@ bottle
codespell
cryptography
docutils!=0.18
-elasticsearch
+elasticsearch<8
elasticsearch-dsl
flake8
mypy
diff --git a/requirements-elastic.txt b/requirements-elastic.txt
index 0f5a12ec91..820f911b5c 100644
--- a/requirements-elastic.txt
+++ b/requirements-elastic.txt
@@ -1,4 +1,4 @@
-elasticsearch
+elasticsearch<8
elasticsearch-dsl
cryptography
pyOpenSSL>=16.1.0
diff --git a/setup.py b/setup.py
index c0bbc14502..511269c374 100755
--- a/setup.py
+++ b/setup.py
@@ -171,7 +171,7 @@ def _write_pkg_file(self, file):
"MongoDB mongo+srv URIs": ["pymongo[srv]"],
"TinyDB (experimental)": ["tinydb"],
"PostgreSQL (experimental)": ["sqlalchemy", "psycopg2"],
- "Elasticsearch (experimental)": ["elasticsearch", "elasticsearch-dsl"],
+ "Elasticsearch (experimental)": ["elasticsearch<8", "elasticsearch-dsl"],
"GSSAPI authentication for MongoDB": ["python-krbV"],
"GSSAPI authentication for HTTP": ["pycurl"],
"Screenshots": ["PIL"],
| DB/Elastic: fix URLs (use elastics:// for TLS)
| 2023-09-20T20:01:27 | 0.0 | [] | [] |
|||
carpentriesoffline/offlinedatasci | carpentriesoffline__offlinedatasci-52 | 05e07e2b04e4fc358e93e58fcc397ce89717f8db | diff --git a/offlinedatasci/main.py b/offlinedatasci/main.py
index 17f2644..0b335e6 100644
--- a/offlinedatasci/main.py
+++ b/offlinedatasci/main.py
@@ -219,14 +219,16 @@ def download_python_libraries(ods_dir,py_library_reqs = [ "matplotlib", "noteboo
'pip': 'pip3',
'dest': download_dir,
'pkgs': py_library_reqs,
- 'python_version': '3.9.6'
+ 'python_version': '3.11',
+ 'allow_binary': True
}
- pypi_mirror.download(platform = ['manylinux1_x86_64'], **parameters)
- pypi_mirror.download(platform = ['macosx_10_10_x86_64'], **parameters)
+ pypi_mirror.download(platform = ['manylinux_2_17_x86_64'], **parameters)
+ pypi_mirror.download(platform = ['macosx_10_12_x86_64'], **parameters)
pypi_mirror.download(platform = ['win_amd64'], **parameters)
mirror_creation_parameters = {
'download_dir': download_dir,
- 'mirror_dir': pypi_dir
+ 'mirror_dir': pypi_dir,
+ 'copy': True
}
pypi_mirror.create_mirror(**mirror_creation_parameters)
| local pypi mirror not currently working
Command line version that works for pandas, numpy, and matplotlib:
```bash
pypi-mirror download -b -d pythonlibraries pandas numpy matplotlib
pypi-mirror create -d pythonlibraries -m pypi -c
```
The `-b` flag allows binary downloads and is critical for dependency handling. The `-c` flag copies files from the download directory into the mirror directory instead of symlinking them, which appears to be needed for running pip on a local directory (instead of via a web server)
| 2023-02-11T17:25:57 | 0.0 | [] | [] |
|||
pyannote/pyannote-audio | pyannote__pyannote-audio-1730 | c718d9863e9aa89a1bce096303583ffab6ddd5fe | diff --git a/CHANGELOG.md b/CHANGELOG.md
index e66294794..b3f6fb850 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,12 @@
# Changelog
+## develop
+
+### Fixes
+
+- fix: fix clipping issue in speech separation pipeline ([@joonaskalda](https://github.com/joonaskalda/))
+
+
## Version 3.3.2 (2024-09-11)
### Fixes
diff --git a/pyannote/audio/pipelines/speech_separation.py b/pyannote/audio/pipelines/speech_separation.py
index c1b9b036c..43c5b8a44 100644
--- a/pyannote/audio/pipelines/speech_separation.py
+++ b/pyannote/audio/pipelines/speech_separation.py
@@ -654,6 +654,12 @@ def apply(
sources.data * discrete_diarization.align(sources).data[:, :num_sources]
)
+ # separated sources might be scaled up/down due to SI-SDR loss used when training
+ # so we peak-normalize them
+ sources.data = sources.data / np.max(
+ np.abs(sources.data), axis=0, keepdims=True
+ )
+
# convert to continuous diarization
diarization = self.to_annotation(
discrete_diarization,
| outputs of separation module is clipping
### Tested versions
- 3.3
### System information
macOS, m1
### Issue description
Hi @hbredin, @joonaskalda thanks for this great release!
I tried some examples on the new pixit pipeline and I find outputs of the separation module seem to produce a very high level of clipping. Is this to be expected from the way it was trained with scale-invariant losses?
Input was a downsampled 16khz mono wav file from the youtube excerpt linked below.
<img width="1926" alt="image" src="https://github.com/pyannote/pyannote-audio/assets/72940/eb86db80-d0c6-4f9e-96ca-6b8a30d36993">
### Minimal reproduction example (MRE)
https://www.youtube.com/watch?v=CGUpPyA48jE&t=182s
```python
# instantiate the pipeline
from pyannote.audio import Pipeline
pipeline = Pipeline.from_pretrained(
"pyannote/speech-separation-ami-1.0",
use_auth_token="HUGGINGFACE_ACCESS_TOKEN_GOES_HERE")
# run the pipeline on an audio file
diarization, sources = pipeline("audio.wav")
# dump the diarization output to disk using RTTM format
with open("audio.rttm", "w") as rttm:
diarization.write_rttm(rttm)
# dump sources to disk as SPEAKER_XX.wav files
import scipy.io.wavfile
for s, speaker in enumerate(diarization.labels()):
scipy.io.wavfile.write(f'{speaker}.wav', 16000, sources.data[:,s])
```
| Hi @faroit, thank you for your interest in PixIT! I suspect the issue is that the current version is trained only on the AMI meeting dataset. On the AMI test set this hasnât been an issue. Finetuning on domain-specific audio would likely improve the separation performance.
@joonaskalda thanks for your reply. I am not sure if fine-tuning would really be able to fix any of this.
I digged a bit deeper and saw that the maximum output after separation is about `81.0` in that example. Also interesting is that it also drifts in terms of bias. Here is the peak-normalized output of speaker 1
<img width="1290" alt="image" src="https://github.com/pyannote/pyannote-audio/assets/72940/9280a778-cd6c-48d9-8b73-87b04df85c31">
Was the model trained on zero-mean, unit variance data?
Thanks for investigating. I checked and the separated sources are (massively) scaled up for AMI data too. I never noticed because Iâve peak-normalized them before use. The scale-invariant loss is indeed the likely culprit.
The training data was not normalized to zero mean and unit variance.
@joonaskalda thanks for the update. Maybe you can add a normalization to the pipeline so that users that aren't familiar with SI-SDR trained models aren't surprised | 2024-06-21T15:25:30 | 0.0 | [] | [] |
||
AllenCellModeling/napari-aicsimageio | AllenCellModeling__napari-aicsimageio-19 | 3dde95793864c7a77b8566d41cd87f563faa67d3 | diff --git a/napari_aicsimageio/core.py b/napari_aicsimageio/core.py
index f781208..173f6c2 100644
--- a/napari_aicsimageio/core.py
+++ b/napari_aicsimageio/core.py
@@ -41,13 +41,6 @@ def _get_full_image_data(img: AICSImage, in_memory: bool) -> Optional[xr.DataArr
return None
-def _get_meta(img: AICSImage) -> Any:
- """
- This return type should change in the future to always return OME from ome-types.
- """
- return img.metadata
-
-
def reader_function(
path: PathLike, in_memory: bool, scene_name: Optional[str] = None
) -> Optional[List[LayerData]]:
@@ -92,8 +85,7 @@ def reader_function(
meta["rgb"] = True
# Apply all other metadata
- meta_reader = partial(_get_meta, img=img)
- meta["metadata"] = {"ome_types": meta_reader}
+ meta["metadata"] = {"ome_types": img.metadata}
return [(data.data, meta, "image")]
| Handle `ome-types` plugin changes
`ome-types` should be a `Dict` instead of a `Callable`. This decision was made after recent bug fixes and community discussions.
See code changes upstream here: https://github.com/tlambert03/ome-types/blob/master/src/ome_types/widgets.py#L60
| 2021-07-16T02:50:21 | 0.0 | [] | [] |
|||
adafruit/Adafruit_CircuitPython_SI5351 | adafruit__Adafruit_CircuitPython_SI5351-21 | 9a7123b6186fbef6d288400ee03303fed97b89d9 | diff --git a/adafruit_si5351.py b/adafruit_si5351.py
index 3133488..79cd7b6 100644
--- a/adafruit_si5351.py
+++ b/adafruit_si5351.py
@@ -169,10 +169,11 @@ def _configure_registers(self, p1, p2, p3):
self._si5351._write_u8(_SI5351_REGISTER_177_PLL_RESET, (1 << 7) | (1 << 5))
def configure_integer(self, multiplier):
- """Configure the PLL with a simple integer mulitplier for the most
+ """Configure the PLL with a simple integer multiplier for the most
accurate (but more limited) PLL frequency generation.
"""
- assert 14 < multiplier < 91
+ if multiplier >= 91 or multiplier <= 14:
+ raise Exception("Multiplier must be in range 14 to 91.")
multiplier = int(multiplier)
# Compute register values and configure them.
p1 = 128 * multiplier - 512
@@ -192,9 +193,14 @@ def configure_fractional(self, multiplier, numerator, denominator):
multiplier and numerator/denominator. This is less accurate and
susceptible to jitter but allows a larger range of PLL frequencies.
"""
- assert 14 < multiplier < 91
- assert 0 < denominator <= 0xFFFFF # Prevent divide by zero.
- assert 0 <= numerator < 0xFFFFF
+ if multiplier >= 91 or multiplier <= 14:
+ raise Exception("Multiplier must be in range 14 to 91.")
+ if denominator > 0xFFFFF or denominator <= 0: # Prevent divide by zero.
+ raise Exception(
+ "Denominator must be greater than 0 and less than 0xFFFFF."
+ )
+ if numerator >= 0xFFFFF or numerator < 0:
+ raise Exception("Numerator must be in range 0 to 0xFFFFF.")
multiplier = int(multiplier)
numerator = int(numerator)
denominator = int(denominator)
@@ -279,7 +285,8 @@ def r_divider(self):
@r_divider.setter
def r_divider(self, divider):
- assert 0 <= divider <= 7
+ if divider > 7 or divider < 0:
+ raise Exception("Divider must in range 0 to 7.")
reg_value = self._si5351._read_u8(self._r)
reg_value &= 0x0F
divider &= 0x07
@@ -306,10 +313,12 @@ def configure_integer(self, pll, divider):
divider. This is the most accurate way to set the clock output
frequency but supports less of a range of values.
"""
- assert 3 < divider < 2049
+ if divider >= 2049 or divider <= 3:
+ raise Exception("Divider must be in range 3 to 2049.")
divider = int(divider)
# Make sure the PLL is configured (has a frequency set).
- assert pll.frequency is not None
+ if pll.frequency is None:
+ raise Exception("PLL must be configured.")
# Compute MSx register values.
p1 = 128 * divider - 512
p2 = 0
@@ -331,14 +340,20 @@ def configure_fractional(self, pll, divider, numerator, denominator):
fractional divider with numerator/denominator. Again this is less
accurate but has a wider range of output frequencies.
"""
- assert 3 < divider < 2049
- assert 0 < denominator <= 0xFFFFF # Prevent divide by zero.
- assert 0 <= numerator < 0xFFFFF
+ if divider >= 2049 or divider <= 3:
+ raise Exception("Divider must be in range 3 to 2049.")
+ if denominator > 0xFFFFF or denominator <= 0: # Prevent divide by zero.
+ raise Exception(
+ "Denominator must be greater than 0 and less than 0xFFFFF."
+ )
+ if numerator >= 0xFFFFF or numerator < 0:
+ raise Exception("Numerator must be in range 0 to 0xFFFFF.")
divider = int(divider)
numerator = int(numerator)
denominator = int(denominator)
# Make sure the PLL is configured (has a frequency set).
- assert pll.frequency is not None
+ if pll.frequency is None:
+ raise Exception("PLL must be configured.")
# Compute MSx register values.
p1 = int(128 * divider + math.floor(128 * (numerator / denominator)) - 512)
p2 = int(
| Change asserts to exceptions
Not urgent. Just noticed this lib is still using asserts.
https://circuitpython.readthedocs.io/en/latest/docs/design_guide.html#exceptions-and-asserts
| @caternuson just for my info, what is the goal here, use exceptions instead of assert n the library?. thanks :)
Yep. See linked design guide info. Other CP libs could also be good reference. In general, this is a trivial change. I'll add a good first issue tag also FWIW.
will do, thanks :) | 2021-04-25T02:21:30 | 0.0 | [] | [] |
||
LorenFrankLab/spyglass | LorenFrankLab__spyglass-882 | b7a2986f3368ecc79651c3d3908a3b92062698b1 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9a59e87da..b4a6406f4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,7 @@
## [0.5.2] (Unreleased)
- Refactor `TableChain` to include `_searched` attribute. #867
+- Fix errors in config import #882
## [0.5.1] (March 7, 2024)
diff --git a/dj_local_conf_example.json b/dj_local_conf_example.json
index defdea4d7..b9b5e725e 100644
--- a/dj_local_conf_example.json
+++ b/dj_local_conf_example.json
@@ -41,7 +41,7 @@
"video": "/your/base/path/video"
},
"kachery_dirs": {
- "cloud": "/your/base/path/kachery_storage",
+ "cloud": "/your/base/path/.kachery-cloud",
"storage": "/your/base/path/kachery_storage",
"temp": "/your/base/path/tmp"
},
diff --git a/src/spyglass/settings.py b/src/spyglass/settings.py
index 202ac33fb..af16e688d 100644
--- a/src/spyglass/settings.py
+++ b/src/spyglass/settings.py
@@ -70,7 +70,7 @@ def __init__(self, base_dir: str = None, **kwargs):
"video": "video",
},
"kachery": {
- "cloud": "kachery_storage",
+ "cloud": ".kachery-cloud",
"storage": "kachery_storage",
"temp": "tmp",
},
@@ -181,10 +181,13 @@ def load_config(
else None
)
+ source_config = (
+ dj_dlc
+ if prefix == "dlc"
+ else dj_kachery if prefix == "kachery" else dj_spyglass
+ )
dir_location = (
- dj_spyglass.get(dir)
- or dj_kachery.get(dir)
- or dj_dlc.get(dir)
+ source_config.get(dir)
or env_loc
or str(Path(this_base) / dir_str)
).replace('"', "")
| Spyglass Config Bugs
**Describe the bug**
Error when same relative directory name in multiple base directories:
- the sub-directory `video` occurs in both `spyglass_base` and `dlc_base`
- [This line](https://github.com/LorenFrankLab/spyglass/blob/440601f9d9658ce6dbc5d283af79a6136e3db606/src/spyglass/settings.py#L184) Prioritizes taking from the spyglass section of the datajoint config regardless what the prefix is
Error is kachery cloud dir:
- I believe [this](https://github.com/LorenFrankLab/spyglass/blob/440601f9d9658ce6dbc5d283af79a6136e3db606/src/spyglass/settings.py#L73) should be `kachery_cloud`
- Will also need changed in the example local config
| 2024-03-21T17:34:58 | 0.0 | [] | [] |
|||
getmail6/getmail6 | getmail6__getmail6-63 | 571b1432a7cc91a24b0b1eef2c9904f8d473b7f3 | diff --git a/getmail b/getmail
index 98f5511..ae7f7e2 100755
--- a/getmail
+++ b/getmail
@@ -163,6 +163,8 @@ def go(configs, idle):
syslog.syslog(syslog.LOG_INFO, logline)
retriever.initialize(options)
destination.retriever_info(retriever)
+ # session ready for idling
+ idling = idle
for mailbox in retriever.mailboxes:
if mailbox:
@@ -206,13 +208,20 @@ def go(configs, idle):
try:
msg = retriever.getmsg(msgid)
except getmailRetrievalError as o:
+ # Check if xoauth2 token was expired
+ # (Exchange Online only)
+ if 'AccessTokenExpired' in str(o):
+ log.warn('Retrieval error: %s\n' % o)
+ idling = False
+ break
errorexit = True
log.error(
- 'Retrieval error: server for %s is broken; '
+ 'Retrieval error: %s\n'
+ 'Server for %s is broken; '
'offered message %s but failed to provide it. '
'Please notify the administrator of the '
'server. Skipping message...\n'
- % (retriever, msgid)
+ % (o, retriever, msgid)
)
continue
msgs_retrieved += 1
@@ -436,9 +445,16 @@ def go(configs, idle):
# what we want.
# Expunge and close the mailbox to prevent the same messages
# being pulled again in some configurations.
- retriever.close_mailbox()
try:
- idling = retriever.go_idle(idle)
+ retriever.close_mailbox()
+ except imaplib.IMAP4.abort as o:
+ # Treat "abort" exception as temporary failure
+ log.info('%s: session aborted during close_mailbox (%s)\n'
+ % (configfile, o))
+ idling = False
+ try:
+ if idling:
+ idling = retriever.go_idle(idle)
# Returned from idle
retriever.set_new_timestamp()
configs.append(configs[0])
diff --git a/getmailcore/_retrieverbases.py b/getmailcore/_retrieverbases.py
index df0c5de..7dd5ee1 100755
--- a/getmailcore/_retrieverbases.py
+++ b/getmailcore/_retrieverbases.py
@@ -1758,6 +1758,10 @@ def go_idle(self, folder, timeout=300):
self.conn._command_complete('IDLE', tag)
except imaplib.IMAP4.error as o:
return False
+ except BrokenPipeError as o:
+ # The underlying TLS connection closed during IDLE
+ self.log.info('BrokenPipeError after IDLE\n')
+ return False
if aborted:
raise aborted
| Unhandled exceptions with IMAP IDLE + OAuth2.0
My configuration:
```
% getmail -iINBOX --dump
getmail version 6.12
Copyright (C) 1998-2020 Charles Cazabon and others. Licensed under GNU GPL version 2.
getmail configuration:
getmail version 6.12
Python version 3.9.1 (default, Dec 12 2020, 09:45:22)
[Clang 8.0.1 (tags/RELEASE_801/final 366581)]
retriever: SimpleIMAPSSLRetriever(ca_certs="None", certfile="None", getmaildir="/home/kasahara/.getmail", keyfile="None", mailboxes="('INBOX',)", move_on_delete="None", password="*", password_command="('getmail-gmail-xoauth-tokens', '/home/kasahara/.getmail/exchange.json')", port="993", record_mailbox="True", server="outlook.office365.com", ssl_cert_hostname="None", ssl_ciphers="None", ssl_fingerprints="()", ssl_version="None", timeout="180", use_cram_md5="False", use_kerberos="False", use_peek="True", use_xoauth2="True", username="(hidden)")
destination: MDA_external(allow_root_commands="False", arguments="()", command="filter", group="None", ignore_stderr="False", path="/usr/local/lib/mailagent/filter", unixfrom="True", user="None")
options:
delete : False
delete_after : 0
delete_bigger_than : 0
delivered_to : False
fingerprint : False
logfile : logfile(filename="~/.getmail/getmail.log")
max_bytes_per_session : 0
max_message_size : 0
max_messages_per_session : 0
message_log : ~/.getmail/getmail.log
message_log_syslog : True
message_log_verbose : False
read_all : False
received : True
verbose : 1
```
While fetching messages from Exchange Online with IMAP IDLE + OAuth2.0, I occasionally got unhandled exceptions.
```
IDLE message received
msg 10023/10023 (266372 bytes) delivered
1 messages (266372 bytes) retrieved, 10022 skipped from SimpleIMAPSSLRetriever:(hidden)@outlook.office365.com:993
0 messages (0 bytes) retrieved, 10023 skipped from SimpleIMAPSSLRetriever:(hidden)@[email protected]:993
Exception: please read docs/BUGS and include the following information in any bug report:
getmail version 6.12
Python version 3.9.1 (default, Dec 12 2020, 09:45:22)
[Clang 8.0.1 (tags/RELEASE_801/final 366581)]
Unhandled exception follows:
File "/usr/local/bin/getmail", line 916, in main
success = go(configs, options.idle)
File "/usr/local/bin/getmail", line 439, in go
retriever.close_mailbox()
File "/usr/local/lib/python3.9/site-packages/getmailcore/_retrieverbases.py", line 1359, in close_mailbox
self.conn.close()
File "/usr/local/lib/python3.9/imaplib.py", line 475, in close
typ, dat = self._simple_command('CLOSE')
File "/usr/local/lib/python3.9/imaplib.py", line 1230, in _simple_command
return self._command_complete(name, self._command(name, *args))
File "/usr/local/lib/python3.9/imaplib.py", line 1049, in _command_complete
raise self.abort('command: %s => %s' % (name, val))
imaplib.IMAP4.abort: command: CLOSE => Session invalidated - AccessTokenExpired
Please also include configuration information from running getmail
with your normal options plus "--dump".
```
```
IDLE message received
Exception: please read docs/BUGS and include the following information in any bug report:
getmail version 6.12
Python version 3.9.1 (default, Dec 12 2020, 09:45:22)
[Clang 8.0.1 (tags/RELEASE_801/final 366581)]
Unhandled exception follows:
File "/usr/local/bin/getmail", line 916, in main
success = go(configs, options.idle)
File "/usr/local/bin/getmail", line 441, in go
idling = retriever.go_idle(idle)
File "/usr/local/lib/python3.9/site-packages/getmailcore/_retrieverbases.py", line 1757, in go_idle
self.conn.send(b'DONE\r\n')
File "/usr/local/lib/python3.9/imaplib.py", line 332, in send
self.sock.sendall(data)
File "/usr/local/lib/python3.9/ssl.py", line 1204, in sendall
v = self.send(byte_view[count:])
File "/usr/local/lib/python3.9/ssl.py", line 1173, in send
return self._sslobj.write(data)
BrokenPipeError: [Errno 32] Broken pipe
Please also include configuration information from running getmail
with your normal options plus "--dump".
```
It seems that the server invalidates the session due to token expiration where `getmail6` doesn't expect it.
Also I got this once.
```
IDLE message received
Retrieval error: server for SimpleIMAPSSLRetriever:(hidden)@outlook.office365.com:993 is broken; offered message 14/238306 but failed to provide it. Please notify the administrator of the server. Skipping message...
0 messages (0 bytes) retrieved, 9870 skipped
IMAP error during logout (Session invalidated - AccessTokenExpired)
```
I'm afraid that I don't know how to handle these exceptions properly in these contexts. I guess it is ok to catch these exceptions and re-initialize the retriever to go idle again.
| You use v6.12. Isn't that solved with your last change, which is now in v6.13?
I'm sorry about the ambiguity of my version. The version number shown was 6.12, but my fix was incorporated.
The previous fix was for the (re-)authentication after the session was invalidated. On the other hand, these exceptions seemed to occur when the session was invalidated and `getmail6` tried to send something to the invalid session.
For example, the first exception occurred around here:
```
getmail:
420 if idle and not errorexit:
(comment omitted)
439 retriever.close_mailbox() # <----- HERE
440 try:
441 idling = retriever.go_idle(idle)
```
```
getmailcore/_retrieverbases.py:
1353 def close_mailbox(self):
1354 # Close current mailbox so deleted mail is expunged. One getmail
1355 # user had a buggy IMAP server that didn't do the automatic expunge,
1356 # so we do it explicitly here if we've deleted any messages.
1357 if self.deleted:
1358 self.conn.expunge()
1359 self.conn.close() # <----- HERE
1360 self.write_oldmailfile(self.mailbox_selected)
```
So I think the session was invalidated just before `close_mailbox()` after the mailbox was successfully checked. It seems that when `getmail6` tried to send "CLOSE" to the server, the server had already invalidated the session and returned an error, so the exception `imaplib.IMAP4.abort` was raised. Maybe we can skip idling and start over in this case.
> Maybe we can skip idling and start over in this case.
According the comments at `getmail:421`, IDLE handling is not ideal and it does a reconnect anyway. Maybe just catching the specific exception is a solution.
Could you try, test and make a pull request if working for you?
Ok, I'll try to fix and make a pull request. It will take time to test, so please wait for a while...
| 2021-01-13T05:10:43 | 0.0 | [] | [] |
||
spcl/graph-of-thoughts | spcl__graph-of-thoughts-18 | 8f1e6ce81de732ccf9856d9270eda92df2d655dc | diff --git a/graph_of_thoughts/language_models/abstract_language_model.py b/graph_of_thoughts/language_models/abstract_language_model.py
index a066eaf..cead63c 100644
--- a/graph_of_thoughts/language_models/abstract_language_model.py
+++ b/graph_of_thoughts/language_models/abstract_language_model.py
@@ -80,12 +80,12 @@ def query(self, query: str, num_responses: int = 1) -> Any:
pass
@abstractmethod
- def get_response_texts(self, query_responses: Union[List[Dict], Dict]) -> List[str]:
+ def get_response_texts(self, query_responses: Union[List[Any], Any]) -> List[str]:
"""
Abstract method to extract response texts from the language model's response(s).
:param query_responses: The responses returned from the language model.
- :type query_responses: Union[List[Dict], Dict]
+ :type query_responses: Union[List[Any], Any]
:return: List of textual responses.
:rtype: List[str]
"""
diff --git a/graph_of_thoughts/language_models/chatgpt.py b/graph_of_thoughts/language_models/chatgpt.py
index 52da92a..4f63d61 100644
--- a/graph_of_thoughts/language_models/chatgpt.py
+++ b/graph_of_thoughts/language_models/chatgpt.py
@@ -7,11 +7,12 @@
# main author: Nils Blach
import backoff
-import openai
import os
import random
import time
from typing import List, Dict, Union
+from openai import OpenAI, OpenAIError
+from openai.types.chat.chat_completion import ChatCompletion
from .abstract_language_model import AbstractLanguageModel
@@ -53,15 +54,15 @@ def __init__(
self.organization: str = self.config["organization"]
if self.organization == "":
self.logger.warning("OPENAI_ORGANIZATION is not set")
- else:
- openai.organization = self.organization
- # The api key is the api key that is used for chatgpt. Env variable OPENAI_API_KEY takes precedence over config.
self.api_key: str = os.getenv("OPENAI_API_KEY", self.config["api_key"])
if self.api_key == "":
raise ValueError("OPENAI_API_KEY is not set")
- openai.api_key = self.api_key
+ # Initialize the OpenAI Client
+ self.client = OpenAI(api_key=self.api_key, organization=self.organization)
- def query(self, query: str, num_responses: int = 1) -> Dict:
+ def query(
+ self, query: str, num_responses: int = 1
+ ) -> Union[List[ChatCompletion], ChatCompletion]:
"""
Query the OpenAI model for responses.
@@ -100,10 +101,8 @@ def query(self, query: str, num_responses: int = 1) -> Dict:
self.respone_cache[query] = response
return response
- @backoff.on_exception(
- backoff.expo, openai.error.OpenAIError, max_time=10, max_tries=6
- )
- def chat(self, messages: List[Dict], num_responses: int = 1) -> Dict:
+ @backoff.on_exception(backoff.expo, OpenAIError, max_time=10, max_tries=6)
+ def chat(self, messages: List[Dict], num_responses: int = 1) -> ChatCompletion:
"""
Send chat messages to the OpenAI model and retrieves the model's response.
Implements backoff on OpenAI error.
@@ -113,9 +112,9 @@ def chat(self, messages: List[Dict], num_responses: int = 1) -> Dict:
:param num_responses: Number of desired responses, default is 1.
:type num_responses: int
:return: The OpenAI model's response.
- :rtype: Dict
+ :rtype: ChatCompletion
"""
- response = openai.ChatCompletion.create(
+ response = self.client.chat.completions.create(
model=self.model_id,
messages=messages,
temperature=self.temperature,
@@ -124,8 +123,8 @@ def chat(self, messages: List[Dict], num_responses: int = 1) -> Dict:
stop=self.stop,
)
- self.prompt_tokens += response["usage"]["prompt_tokens"]
- self.completion_tokens += response["usage"]["completion_tokens"]
+ self.prompt_tokens += response.usage.prompt_tokens
+ self.completion_tokens += response.usage.completion_tokens
prompt_tokens_k = float(self.prompt_tokens) / 1000.0
completion_tokens_k = float(self.completion_tokens) / 1000.0
self.cost = (
@@ -138,19 +137,21 @@ def chat(self, messages: List[Dict], num_responses: int = 1) -> Dict:
)
return response
- def get_response_texts(self, query_response: Union[List[Dict], Dict]) -> List[str]:
+ def get_response_texts(
+ self, query_response: Union[List[ChatCompletion], ChatCompletion]
+ ) -> List[str]:
"""
Extract the response texts from the query response.
:param query_response: The response dictionary (or list of dictionaries) from the OpenAI model.
- :type query_response: Union[List[Dict], Dict]
+ :type query_response: Union[List[ChatCompletion], ChatCompletion]
:return: List of response strings.
:rtype: List[str]
"""
- if isinstance(query_response, Dict):
+ if not isinstance(query_response, List):
query_response = [query_response]
return [
- choice["message"]["content"]
+ choice.message.content
for response in query_response
- for choice in response["choices"]
+ for choice in response.choices
]
diff --git a/pyproject.toml b/pyproject.toml
index e41f145..ecbf97c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "graph_of_thoughts"
-version = "0.0.2"
+version = "0.0.3"
authors = [
{ name="Maciej Besta", email="[email protected]" },
{ name="Nils Blach", email="[email protected]" },
@@ -20,17 +20,17 @@ classifiers = [
"Operating System :: OS Independent",
]
dependencies = [
- "backoff>=2.2.1",
- "openai>=0.27.7",
- "matplotlib>=3.7.1",
- "numpy>=1.24.3",
- "pandas>=2.0.3",
- "sympy>=1.12",
- "torch>=2.0.1",
- "transformers>=4.31.0",
- "accelerate>=0.21.0",
- "bitsandbytes>=0.41.0",
- "scipy>=1.10.1",
+ "backoff>=2.2.1,<3.0.0",
+ "openai>=1.0.0,<2.0.0",
+ "matplotlib>=3.7.1,<4.0.0",
+ "numpy>=1.24.3,<2.0.0",
+ "pandas>=2.0.3,<3.0.0",
+ "sympy>=1.12,<2.0",
+ "torch>=2.0.1,<3.0.0",
+ "transformers>=4.31.0,<5.0.0",
+ "accelerate>=0.21.0,<1.0.0",
+ "bitsandbytes>=0.41.0,<1.0.0",
+ "scipy>=1.10.1,<2.0.0",
]
[project.urls]
| Controller README
Dear authors,
I don't quite understand the **Controller Instantiation** part in **graph_of_thoughts/controller/README.md**.
How to instantiate Prompter, Parser, GraphOfOperations, and AbstractLanguageModel respectively? Do I need to paste the code block into a Python file and run the Python file? Could you please further explain it?
I didn't complete this part and ran `from examples.sorting.sorting_032 import SortingPrompter, SortingParser, utils`. As a result, I got an error
```
File "/home/weijie/graph-of-thoughts/graph_of_thoughts/language_models/chatgpt.py", line 104, in ChatGPT
backoff.expo, openai.error.OpenAIError, max_time=10, max_tries=6
^^^^^^^^^^^^
AttributeError: module 'openai' has no attribute 'error'
```
Best regards,
Weijie Liu
| 2023-11-20T13:41:37 | 0.0 | [] | [] |
|||
ipeaGIT/geobr | ipeaGIT__geobr-316 | ea4b7b964682854c32e0f64dc71fb66a403ce579 | diff --git a/python-package/geobr/utils.py b/python-package/geobr/utils.py
index 707a940b..95c05a74 100644
--- a/python-package/geobr/utils.py
+++ b/python-package/geobr/utils.py
@@ -25,10 +25,12 @@ def url_solver(url):
for url in urls:
- response = requests.get(url)
-
- if response.status_code == 200:
- return response
+ try:
+ response = requests.get(url)
+ if response.status_code == 200:
+ return response
+ except:
+ continue
raise ConnectionError(
"No mirrors are active. Please report to https://github.com/ipeaGIT/geobr/issues"
| Max Retries Exceeded Error
It seems like I do not have permission to connect to the IPEA server to access the data.
I'm using the Python package.
Getting the following error:
ConnectionError: HTTPConnectionPool(host='[www.ipea.gov.br](https://www.ipea.gov.br/)', port=80): Max retries exceeded with url: /geobr/metadata/metadata_1.7.0_gpkg.csv (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f79604a1f70>: Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known'))
Any explanation for why I would be getting this error and how I can access? Thanks!
| Hi @bennettcapozzi . Thanks for opening this issue. This problem occurred because our servers were offline yesterday for planned maintenance. However, this ERROR should not have happened because **geobr** should have redirected to link to our data stored on GitHub. Perhaps there was an issue in the redirection @JoaoCarabetta ? | 2023-05-20T16:55:45 | 0.0 | [] | [] |
||
Apkawa/xlsx2html | Apkawa__xlsx2html-44 | 5b04c3373100ea22b8e3c339f4ec55f3029957af | diff --git a/xlsx2html/core.py b/xlsx2html/core.py
index a80626b..15faf30 100644
--- a/xlsx2html/core.py
+++ b/xlsx2html/core.py
@@ -171,6 +171,11 @@ def worksheet_to_data(ws, locale=None, fs=None, default_cell_border="none"):
)
for cell_range in merged_cell_ranges:
+ if ":" not in str(cell_range):
+ cell_range_list = list(ws[f"{cell_range}:{cell_range}"])
+ else:
+ cell_range_list = list(ws[cell_range])
+
cell_range_list = list(ws[cell_range])
m_cell = cell_range_list[0][0]
| Cell object is not iterable
in file xlsx2html/xlsx2html/core.py line 171
```py
for cell_range in merged_cell_ranges:
cell_range_list = list(ws[cell_range])
m_cell = cell_range_list[0][0]
```
if i use openpyxl merge only one row data
`merged_cell_range` will like this:
`['A2:A4','A5:A7','B7']`
then ` list(ws[cell_range])` will throw
`Cell object is not iterable`
| Me need example file. i dunno what is bug with merge cell | 2024-02-09T07:01:48 | 0.0 | [] | [] |
||
frostming/marko | frostming__marko-179 | bdb23cd4dc8bee13c8d048bcfc6feb2a00db0005 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 47b6ff8..2997e4e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,7 @@
+## Unreleased
+
+- Add pretty representation for the AST for debugging purpose. An extra group `repr` is added for more readable output.
+
## v2.0.2(2023-11-16)
### Fixed
diff --git a/marko/element.py b/marko/element.py
index 0fc7af2..2e10488 100644
--- a/marko/element.py
+++ b/marko/element.py
@@ -1,3 +1,5 @@
+from typing import Any
+
from .helpers import camel_to_snake_case
@@ -8,6 +10,7 @@ class Element:
"""
override: bool
+ children: Any
@classmethod
def get_type(cls, snake_case: bool = False) -> str:
@@ -23,3 +26,13 @@ def get_type(cls, snake_case: bool = False) -> str:
else:
name = cls.__name__
return camel_to_snake_case(name) if snake_case else name
+
+ def __repr__(self) -> str:
+ try:
+ from objprint import objstr
+ except ImportError:
+ from pprint import pformat
+
+ return f"<{self.__class__.__name__} children={pformat(self.children)}>"
+ else:
+ return objstr(self, honor_existing=False, include=["children"])
diff --git a/pdm.lock b/pdm.lock
index 486df20..3201cc3 100644
--- a/pdm.lock
+++ b/pdm.lock
@@ -2,10 +2,10 @@
# It is not intended for manual editing.
[metadata]
-groups = ["default", "benchmark", "codehilite", "dev", "doc", "toc"]
+groups = ["default", "benchmark", "codehilite", "dev", "doc", "toc", "repr"]
strategy = ["cross_platform"]
lock_version = "4.4"
-content_hash = "sha256:d0ab93b433c852f9bc6ad14877c37e260ffe2a735cc8790951cf2c8a4260d712"
+content_hash = "sha256:3dc6244761616c0a38f3c3c5356adb07b85cb016d0fd12fced271510a359cd2e"
[[package]]
name = "alabaster"
@@ -498,6 +498,16 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
+[[package]]
+name = "objprint"
+version = "0.2.3"
+requires_python = ">=3.6"
+summary = "A library that can print Python objects in human readable format"
+files = [
+ {file = "objprint-0.2.3-py3-none-any.whl", hash = "sha256:1721e6f97bae5c5b86c2716a0d45a9dd2c9a4cd9f52cfc8a0dfbe801805554cb"},
+ {file = "objprint-0.2.3.tar.gz", hash = "sha256:73d0ad5a7c3151fce634c8892e5c2a050ccae3b1a353bf1316f08b7854da863b"},
+]
+
[[package]]
name = "packaging"
version = "21.3"
diff --git a/pyproject.toml b/pyproject.toml
index b061e0f..72ba2d1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -35,6 +35,7 @@ Documentation = "https://marko-py.readthedocs.io"
[project.optional-dependencies]
toc = ["python-slugify"]
codehilite = ["pygments"]
+repr = ["objprint"]
[project.scripts]
marko = "marko.cli:main"
| __str__, __repr__ for classes
I'd like to inspect a `Document`. Is there a helper method to do that? Perhaps making all classes `@documentclass`es would help.
| [objprint](https://pypi.org/project/objprint) works quite well for this purpose, while at the same time I am seeking for a lightweight package to enable this. | 2023-11-23T00:33:55 | 0.0 | [] | [] |
||
vimt/MaxMind-DB-Writer-python | vimt__MaxMind-DB-Writer-python-15 | 6d836697142f4bb98b4a13073aa7e638bc886197 | diff --git a/README.md b/README.md
index d652ead..5089cb2 100644
--- a/README.md
+++ b/README.md
@@ -10,14 +10,16 @@
# MaxMind-DB-Writer-python
-Make `mmdb` format ip library file which can be read by [`maxmind` official language reader](https://dev.maxmind.com/geoip/geoip2/downloadable/)
+Make `mmdb` format ip library file which can be read by [
+`maxmind` official language reader](https://dev.maxmind.com/geoip/geoip2/downloadable/)
-~~[The official perl writer](https://github.com/maxmind/MaxMind-DB-Writer-perl) was written in perl,
-which was difficult to customize.
+~~[The official perl writer](https://github.com/maxmind/MaxMind-DB-Writer-perl) was written in perl,
+which was difficult to customize.
So I implemented the `MaxmindDB format` ip library in python language.~~
-MaxMind has now released an official Go version of the MMDB writer.
-If you prefer using Go, you can check out the official Go implementation [mmdbwriter](https://github.com/maxmind/mmdbwriter).
+MaxMind has now released an official Go version of the MMDB writer.
+If you prefer using Go, you can check out the official Go
+implementation [mmdbwriter](https://github.com/maxmind/mmdbwriter).
This project still provides a Python alternative for those who need it.
## Install
@@ -27,30 +29,35 @@ pip install -U mmdb_writer
```
## Usage
+
```python
from netaddr import IPSet
from mmdb_writer import MMDBWriter
+
writer = MMDBWriter()
writer.insert_network(IPSet(['1.1.0.0/24', '1.1.1.0/24']), {'country': 'COUNTRY', 'isp': 'ISP'})
writer.to_db_file('test.mmdb')
import maxminddb
+
m = maxminddb.open_database('test.mmdb')
r = m.get('1.1.1.1')
assert r == {'country': 'COUNTRY', 'isp': 'ISP'}
```
## Examples
+
see [csv_to_mmdb.py](./examples/csv_to_mmdb.py)
Here is a professional and clear translation of the README.md section from Chinese into English:
## Using the Java Client
-### TLDR
+If you are using the Java client, you need to be careful to set the `int_type` parameter so that Java correctly
+recognizes the integer type in the MMDB file.
-When generating an MMDB file for use with the Java client, you must specify the `int_type`:
+Example:
```python
from mmdb_writer import MMDBWriter
@@ -65,15 +72,15 @@ Alternatively, you can explicitly specify data types using the [Type Enforcement
In Java, when deserializing to a structure, the numeric types will use the original MMDB numeric types. The specific
conversion relationships are as follows:
-| mmdb type | java type |
-|--------------|------------|
-| float (15) | Float |
-| double (3) | Double |
-| int32 (8) | Integer |
-| uint16 (5) | Integer |
-| uint32 (6) | Long |
-| uint64 (9) | BigInteger |
-| uint128 (10) | BigInteger |
+| mmdb type | java type |
+|-----------|------------|
+| float | Float |
+| double | Double |
+| int32 | Integer |
+| uint16 | Integer |
+| uint32 | Long |
+| uint64 | BigInteger |
+| uint128 | BigInteger |
When using the Python writer to generate an MMDB file, by default, it converts integers to the corresponding MMDB type
based on the size of the `int`. For instance, `int(1)` would convert to `uint16`, and `int(2**16+1)` would convert
@@ -97,7 +104,17 @@ MMDB file. The behaviors for different `int_type` settings are:
| u64 | Stores all integer types as `uint64`. |
| u128 | Stores all integer types as `uint128`. |
+If you want to use different int types for different scenarios, you can use type wrapping:
+
+```python
+from mmdb_writer import MMDBWriter, MmdbI32, MmdbF32
+
+writer = MMDBWriter()
+# the value of field "i32" will be stored as int32 type
+writer.insert_network(IPSet(["1.0.0.0/24"]), {"i32": MmdbI32(128), "f32": MmdbF32(1.22)})
+```
+
+## Reference:
-## Reference:
- [MaxmindDB format](http://maxmind.github.io/MaxMind-DB/)
- [geoip-mmdb](https://github.com/i-rinat/geoip-mmdb)
diff --git a/mmdb_writer.py b/mmdb_writer.py
index e26de1f..7b984a6 100644
--- a/mmdb_writer.py
+++ b/mmdb_writer.py
@@ -378,11 +378,12 @@ def encode_meta(self, meta):
res += self.encode(v, meta_type.get(k))
return res
- def encode(self, value, type_id=None):
+ def encode(self, value, type_id=None, return_offset=False):
if self.cache:
cache_key = self._freeze(value)
try:
- return self.data_cache[cache_key]
+ offset = self.data_cache[cache_key]
+ return offset if return_offset else self._encode_pointer(offset)
except KeyError:
pass
@@ -399,18 +400,11 @@ def encode(self, value, type_id=None):
res = encoder(value)
if self.cache:
- # add to cache
- if type_id == 1:
- self.data_list.append(res)
- self.data_pointer += len(res)
- return res
- else:
- self.data_list.append(res)
- pointer_position = self.data_pointer
- self.data_pointer += len(res)
- pointer = self.encode(pointer_position, 1)
- self.data_cache[cache_key] = pointer
- return pointer
+ self.data_list.append(res)
+ offset = self.data_pointer
+ self.data_pointer += len(res)
+ self.data_cache[cache_key] = offset
+ return offset if return_offset else self._encode_pointer(offset)
return res
@@ -484,8 +478,8 @@ def _enumerate_nodes(self, node):
elif type(node) is SearchTreeLeaf:
node_id = id(node)
if node_id not in self._leaf_offset:
- res = self.encoder.encode(node.value)
- self._leaf_offset[node_id] = self._data_pointer - len(res)
+ offset = self.encoder.encode(node.value, return_offset=True)
+ self._leaf_offset[node_id] = offset + 16
else: # == None
return
| caching issues using dictionary
After running version 0.2.4 on a large data set, I have observed that caching large number of key value pair sometime causes cache error and corrupt some of the the cache entries. These occurrence are totally random and happens only when the cache size reach a significant size. I was not able to pin point the exact cause.
| Upon further debugging. I found something very strange in the Encoder class. When this object cache gets sufficiently large ( about 69145 entries). I am getting some corrupted value. Example:
> ipdb\> self.encode(1153669, 1)
b'((\x0c'
ipdb> self._encode_pointer(1153669)
b'0\t\x92\x85'
the pointer value of 1153669 decode to two very different value. Incidentally, the _encode_pointer returns the correct value. Pointer 1153668-1153671 will always decode to the incorrect value for this Encoder object. Yes, we have a different objects, the error would happens on another range of pointers. Example of a correctly encoded pointer.
> ipdb\> self.encode(1153675,1)
b'0\t\x92\x8b'
ipdb> self._encode_pointer(1153675)
b'0\t\x92\x8b'
After confirming this behavior, I narrow down to this
> type_id = self.python_type_id(value)
I suspect the mapping of the type_decoder may have some kind of caching of these function and call the incorrect function when the object grows to a significant size. (just speculating). Anyhow, since, I know the root cause. I patch the code to call the _encode_pointer(value) directly instead of going thru the function mapping. The random corruption of prefixes/data is completely gone.
```diff
self.data_list.append(res)
pointer_position = self.data_pointer
self.data_pointer += len(res)
- pointer = self.encode(pointer_position, 1)
+ pointer = self._encode_pointer(pointer_position)
self.data_cache[cache_key] = pointer
return pointer
return res
```
Do you foresee any issue with my change? I am not sure if this is the correct way to fix this issue.
| 2024-10-04T15:23:59 | 0.0 | [] | [] |
||
silx-kit/jupyterlab-h5web | silx-kit__jupyterlab-h5web-117 | ca7ba196ff03b41444e46df4c43375e45965916c | diff --git a/example.ipynb b/example.ipynb
index 77c68ce..cbf18c9 100644
--- a/example.ipynb
+++ b/example.ipynb
@@ -33,7 +33,8 @@
" h5file['threeD'] = [np.sin(2*np.pi*f*np.sqrt(Xg**2 + Yg**2)) for f in np.arange(0.1, 1.1, 0.1)]\n",
" h5file['twoD'] = np.sin(np.sqrt(Xg**2 + Yg**2))\n",
" h5file.create_dataset('oneD', data=X, dtype='>f4')\n",
- " h5file['scalar'] = 42"
+ " h5file['scalar'] = 42 \n",
+ " h5file['complex'] = X + 2j * Y"
]
},
{
diff --git a/jupyterlab_h5web/handlers.py b/jupyterlab_h5web/handlers.py
index 1f6fdec..8c9f903 100644
--- a/jupyterlab_h5web/handlers.py
+++ b/jupyterlab_h5web/handlers.py
@@ -11,6 +11,7 @@
ResolvedEntityContent,
DatasetContent,
)
+from h5grove.utils import parse_bool_arg
from .utils import as_absolute_path, create_error
@@ -63,9 +64,12 @@ class DataHandler(ContentHandler):
def parse_content(self, content):
selection = self.get_query_argument("selection", None)
dtype = self.get_query_argument("dtype", None)
+ flatten = parse_bool_arg(
+ self.get_query_argument("flatten", None), fallback=False
+ )
assert isinstance(content, DatasetContent)
- return content.data(selection, dtype=dtype)
+ return content.data(selection, flatten, dtype)
class MetadataHandler(ContentHandler):
| "Expected complex" error
### Describe the bug
I am creating a dummy h5 file containing a complex array and a integer array as bellow, but I get an error message saying "Expected complex" (for the complex array) and (Expected Number) for the integer array. The same does not happen for floats; it opens normally using dtype=np.float64.
```python
import numpy as np
import h5py
file = h5py.File('test.h5','a')
name = '1'
data = np.ones((5,5),dtype=np.complex64)
file.create_dataset(name,data=data)
```
### Screenshots
<img width="985" alt="Screenshot 2023-05-16 at 11 23 55" src="https://github.com/silx-kit/jupyterlab-h5web/assets/16940168/6480b20c-5586-4ff0-ac34-0514c84ef7a6">
### Context
- OS: Ubutun 20.04.4 LTS
- Browser: Chrome Version 112.0.5615.49 (Official Build) (x86_64)
IPython : 7.29.0
ipykernel : 6.5.0
ipywidgets : 7.6.3
jupyter_client : 7.4.9
jupyter_core : 5.2.0
jupyter_server : 1.24.0
jupyterlab : 3.6.1
nbclient : 0.5.3
nbconvert : 7.2.9
nbformat : 5.7.3
notebook : 6.4.0
qtconsole : 5.1.1
traitlets : 5.9.0
<details><summary>Extension lists</summary>
<pre>
JupyterLab v3.6.1
/.../.local/share/jupyter/labextensions
@jupyter-widgets/jupyterlab-manager v3.1.0 enabled OK (python, jupyterlab_widgets)
/.../apps/modules/python/3.9.2/share/jupyter/labextensions
jupyterlab-plotly v5.1.0 enabled OK
jupyterlab-h5web v7.0.0 enabled OK (python, jupyterlab_h5web)
jupyter-matplotlib v0.11.2 enabled OK
config dir: /...//.local/etc/jupyter
jupyter_server_ydoc enabled
- Validating...
X is jupyter_server_ydoc importable?
config dir: /.../modules/python/3.9.2/etc/jupyter
jupyter_server_ydoc enabled
- Validating...
X is jupyter_server_ydoc importable?
jupyterlab enabled
- Validating...
jupyterlab 3.6.1 OK
jupyterlab_h5web enabled
- Validating...
jupyterlab_h5web 7.0.0 OK
</pre>
</details>
| It should be easy to fix: this is similar to #111 where the `dtype` query arg was not parsed. Except that it is the `flatten` arg in the case.
Thanks for reporting the problem ! | 2023-05-17T11:36:08 | 0.0 | [] | [] |
||
mrwan2546/EnkaNetwork.py | mrwan2546__EnkaNetwork.py-9 | 3ec5e8ecdc2cc67fc57278fb4450e2fdb8d12bca | diff --git a/enkanetwork/__init__.py b/enkanetwork/__init__.py
index 4fb1692..4a71832 100644
--- a/enkanetwork/__init__.py
+++ b/enkanetwork/__init__.py
@@ -1,11 +1,14 @@
+__title__ = 'enkanetwork.py'
+__author__ = 'M-307'
+__version__ = '1.2.10dev0'
+__license__ = 'MIT'
+__copyright__ = 'Copyright 2022-present M-307'
+
+
from .client import *
from .exception import *
from .model import *
from .utils import *
-from .info import *
from .cache import *
from .enum import *
-from .assets import *
-
-__VERSION__ = VERSION
-__AUTHOR__ = AUTHOR
+from .assets import *
\ No newline at end of file
diff --git a/enkanetwork/assets.py b/enkanetwork/assets.py
index 786af4b..ec34e4c 100644
--- a/enkanetwork/assets.py
+++ b/enkanetwork/assets.py
@@ -2,13 +2,11 @@
import os
import logging
-from typing import Dict, List, TextIO
-
from .enum import Language
from .model import assets
from .utils import create_ui_path
-from typing import Union
+from typing import Dict, List, TextIO, Optional, Union
PATH = os.path.dirname(os.path.abspath(__file__))
@@ -41,7 +39,7 @@ def COSTUMES_IDS(self) -> List[str]:
return [x for x in self.DATA["costumes"]]
@classmethod
- def character(cls, id: Union[int, str]) -> Union[assets.CharacterAsset, None]: # noqa: E501
+ def character(cls, id: Union[int, str]) -> Optional[assets.CharacterAsset]:
LOGGER.debug(f"Getting character assets with id: {id}")
data = cls.DATA["characters"].get(str(id))
@@ -57,7 +55,7 @@ def character(cls, id: Union[int, str]) -> Union[assets.CharacterAsset, None]:
})
@classmethod
- def character_costume(cls, id: int): # noqa: E501
+ def character_costume(cls, id: int) -> Optional[assets.CharacterCostume]:
LOGGER.debug(f"Getting costume assets with id: {id}")
data = cls.DATA["costumes"].get(str(id))
if not data:
@@ -70,7 +68,7 @@ def character_costume(cls, id: int): # noqa: E501
})
@classmethod
- def constellations(cls, id: int) -> Union[assets.CharacterConstellationsAsset, None]: # noqa: E501
+ def constellations(cls, id: int) -> Optional[assets.CharacterConstellationsAsset]:
LOGGER.debug(f"Getting character constellations assets with id: {id}")
data = cls.DATA["constellations"].get(str(id))
if not data:
@@ -84,7 +82,7 @@ def constellations(cls, id: int) -> Union[assets.CharacterConstellationsAsset, N
})
@classmethod
- def skills(cls, id: int) -> Union[assets.CharacterSkillAsset, None]:
+ def skills(cls, id: int) -> Optional[assets.CharacterSkillAsset]:
LOGGER.debug(f"Getting character skills assets with id: {id}")
data = cls.DATA["skills"].get(str(id))
@@ -99,7 +97,7 @@ def skills(cls, id: int) -> Union[assets.CharacterSkillAsset, None]:
})
@classmethod
- def namecards(cls, id: int) -> Union[assets.NamecardAsset, None]:
+ def namecards(cls, id: int) -> Optional[assets.NamecardAsset]:
LOGGER.debug(f"Getting namecards assets with id: {id}")
data = cls.DATA["namecards"].get(str(id))
if not data:
@@ -115,7 +113,7 @@ def namecards(cls, id: int) -> Union[assets.NamecardAsset, None]:
})
@classmethod
- def get_hash_map(cls, hash_id: str) -> Union[str, None]:
+ def get_hash_map(cls, hash_id: str) -> Optional[str]:
LOGGER.debug(f"Getting nameTextMapHash {hash_id} with language: {cls.LANGS}") # noqa: E501
for key in cls.HASH_MAP:
if str(hash_id) in cls.HASH_MAP[key]:
@@ -127,7 +125,7 @@ def get_hash_map(cls, hash_id: str) -> Union[str, None]:
return
@classmethod
- def character_icon(cls, id: int) -> Union[assets.CharacterIconAsset, None]:
+ def character_icon(cls, id: int) -> Optional[assets.CharacterIconAsset]:
data = cls.character(id)
if not data:
return
diff --git a/enkanetwork/client.py b/enkanetwork/client.py
index fe8e02f..e2167dc 100644
--- a/enkanetwork/client.py
+++ b/enkanetwork/client.py
@@ -73,7 +73,7 @@ async def fetch_user(self, uid: Union[str, int]) -> EnkaNetworkResponse:
self.LOGGER.debug(f"Validating with UID {uid}...")
if self._enable_cache:
- self.LOGGER.warn("Getting data from cache...")
+ self.LOGGER.warning("Getting data from cache...")
data = await self.cache.get(uid)
if data is not None:
diff --git a/enkanetwork/http.py b/enkanetwork/http.py
index c832796..198f4bb 100644
--- a/enkanetwork/http.py
+++ b/enkanetwork/http.py
@@ -36,14 +36,14 @@
class Route:
BASE_URL: ClassVar[str] = "https://enka.network{PATH}"
- RAW_DATA_URL = "https://raw.githubusercontent.com/mrwan200/enkanetwork.py-data/{PATH}"
+ RAW_DATA_URL: ClassVar[str] = "https://raw.githubusercontent.com/mrwan200/enkanetwork.py-data/{PATH}"
def __init__(
- self,
- method: str,
- path: str,
- endpoint: str = 'enka',
- uid: Optional[str] = None,
+ self,
+ method: str,
+ path: str,
+ endpoint: str = 'enka',
+ uid: Optional[str] = None,
) -> None:
self.method = method
self.uid = uid
@@ -85,7 +85,7 @@ async def request(self, route: Route, **kwargs: Any) -> Any:
kwargs['headers'] = {**utils.get_default_header(), **self.__headers}
response: Optional[aiohttp.ClientResponse] = None
- data: Optional[Union[Dict[str, Any]]] = None
+ data: Optional[Union[Dict[str, Any], str]] = None
if self.__session is MISSING:
self.__session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=self.__timeout))
diff --git a/enkanetwork/info.py b/enkanetwork/info.py
deleted file mode 100644
index 11111d7..0000000
--- a/enkanetwork/info.py
+++ /dev/null
@@ -1,2 +0,0 @@
-VERSION = "1.2.10dev0"
-AUTHOR = "M-307"
diff --git a/enkanetwork/utils.py b/enkanetwork/utils.py
index c79fc03..435a67b 100644
--- a/enkanetwork/utils.py
+++ b/enkanetwork/utils.py
@@ -7,7 +7,7 @@
from typing import Any, Dict, TYPE_CHECKING
-from .info import VERSION
+from . import __version__
if TYPE_CHECKING:
from aiohttp import ClientResponse
@@ -43,7 +43,7 @@ def get_default_header():
return {
"User-Agent": "EnkaNetwork.py/{version} (Python {major}.{minor}.{micro})".format( # noqa: E501
- version=VERSION,
+ version=__version__,
major=python_version.major,
minor=python_version.minor,
micro=python_version.micro
diff --git a/setup.py b/setup.py
index e5f76f4..3bd84ee 100644
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,12 @@
import setuptools
import re
-with open('enkanetwork/info.py') as f:
+with open('enkanetwork/__init__.py') as f:
"""
Get version from utils.py
Ref: https://github.com/Rapptz/discord.py/blob/52f3a3496bea13fefc08b38f9ed01641e565d0eb/setup.py#L9
"""
- version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)
+ version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)
setuptools.setup(
name="enkanetwork.py",
| Can't install Library via pip
| Fixed: https://github.com/mrwan200/EnkaNetwork.py/commit/25fe56c8f51935547abfc934f5fd46589352269d
Please install pip again
`pip install enkanetwork.py` | 2022-08-13T06:41:02 | 0.0 | [] | [] |
||
ClementJ18/moddb | ClementJ18__moddb-26 | fce9fecfe29e5269317be674db12db0ad8804b23 | diff --git a/moddb/boxes.py b/moddb/boxes.py
index e7f8bf7..ea66338 100644
--- a/moddb/boxes.py
+++ b/moddb/boxes.py
@@ -1204,7 +1204,7 @@ class Mirror:
The index of the mirror, as multiple mirrors
have the same name. Index starts at 1
city : str
- Alpha 2 code for the city the server is located
+ Alpha 2 code, or full name, of the city the server is located
in
country : str
Alpha 2 code for the country the server is
diff --git a/moddb/pages/file.py b/moddb/pages/file.py
index 55c40ae..c6f36b3 100644
--- a/moddb/pages/file.py
+++ b/moddb/pages/file.py
@@ -169,7 +169,7 @@ def get_mirrors(self):
mirrors_div = html.find("div", class_="mirrors").find_all("div", recursive=False)
mirrors = []
for mirror in mirrors_div:
- mirror_match = re.match(r"(.*) #([0-9]*) \((.{2}), (.{2})\)", mirror.div.p.contents[-1].strip())
+ mirror_match = re.match(r"(.*) #([0-9]*) \((\w+), (.{2})\)", mirror.div.p.contents[-1].strip())
stats_match = re.match(
r"([0-9,]*) downloads? served, ([0-9.]*)% capacity",
mirror.div.span.string,
| File.get_mirrors regexp is too strict, fails to match in some examples
The regexp: `r"(.*) #([0-9]*) \((.{2}), (.{2})\)"` fails to match some mirror lines, e.g. "DBolical EU #3 (FRANKFURT, DE)"
The 'city' part is not always 2 characters long, sometimes it's more (i.e. FRANKFURT as in the example above).

This breaks `File.get_mirrors` which then crashes.
```
Traceback (most recent call last):
File "/home/karthanis/Documents/lutris/moddb_test/toto.py", line 7, in <module>
mirrors = f.get_mirrors()
File "/home/karthanis/Documents/lutris/moddb_test/venv/lib/python3.10/site-packages/moddb-0.8.0-py3.10.egg/moddb/pages/file.py", line 181, in get_mirrors
name=mirror_match.group(1),
AttributeError: 'NoneType' object has no attribute 'group'
```
| 2023-01-13T22:52:21 | 0.0 | [] | [] |
|||
raspishake/rsudp | raspishake__rsudp-47 | c623c71bada58806bfd6ea9b59448527a8e92cc3 | diff --git a/rsudp/raspberryshake.py b/rsudp/raspberryshake.py
index bdb5b3a..335346a 100644
--- a/rsudp/raspberryshake.py
+++ b/rsudp/raspberryshake.py
@@ -516,7 +516,7 @@ def get_inventory(sender='get_inventory'):
url = 'https://fdsnws.raspberryshakedata.com/fdsnws/station/1/query?network=%s&station=%s&level=resp&nodata=404&format=xml' % (
net, stn)#, str(UTCDateTime.now()-timedelta(seconds=14400)))
inv = read_inventory(url)
- region = FlinnEngdahl().get_region(inv[0][0].longitude, inv[0][0].latitude)
+ region = FlinnEngdahl().get_region(inv[0][-1].longitude, inv[0][-1].latitude)
printM('Inventory fetch successful. Station region is %s' % (region), sender)
except (IndexError, HTTPError):
printW('No inventory found for %s. Are you forwarding your Shake data?' % stn, sender)
| When station changes locations, rsudp uses first location in inventory for region lookup
Example:
```
<location 0>
<region: MAINE>
</location 0>
<location 1>
<region: NEW YORK>
</location 1>
```
rsudp region: MAINE
Solution: change `get_inventory()` to look for the last `[-1]` station location (instead of `[0]`) in the inventory to draw coordinates from.
Code location: https://github.com/raspishake/rsudp/blob/master/rsudp/raspberryshake.py#L519
| 2022-11-17T17:15:33 | 0.0 | [] | [] |
|||
zytedata/python-zyte-api | zytedata__python-zyte-api-13 | c64b495806a3d40e2c2cd42041b7bdaf311c10f4 | diff --git a/docs/command_line.rst b/docs/command_line.rst
index d95e333..12d9d2d 100644
--- a/docs/command_line.rst
+++ b/docs/command_line.rst
@@ -5,6 +5,7 @@ Command-line interface
======================
The most basic way to use the client is from a command line.
+
First, create a file with urls, an URL per line (e.g. ``urls.txt``).
Second, set ``ZYTE_API_KEY`` env variable with your
@@ -15,14 +16,15 @@ Then run a script, to get the results:
.. code-block:: shell
- python -m zyte_api urls.txt --output res.jsonl
+ zyte-api urls.txt --output res.jsonl
+
+.. note:: You may use ``python -m zyte_api`` instead of ``zyte-api``.
-.. note::
- The results can be stored in an order which is different from the input
- order. If you need to match the output results to the input URLs, the
- best way is to use the ``echoData`` field (see below); it is passed through,
- and returned as-is in the ``echoData`` attribute. By default it will
- contain the input URL the content belongs to.
+The results may be stored in an order which is different from the input order.
+If you need to match the output results to the input URLs, the best way is to
+use the ``echoData`` field (see below); it is passed through, and returned
+as-is in the ``echoData`` attribute. By default it will contain the input URL
+the content belongs to.
If you need more flexibility, you can customize the requests by creating
a JsonLines file with queries: a JSON object per line. You can pass any
@@ -44,7 +46,7 @@ To get results for this ``requests.jsonl`` file, run:
.. code-block:: shell
- python -m zyte_api requests.jsonl --output res.jsonl
+ zyte-api requests.jsonl --output res.jsonl
Processing speed
~~~~~~~~~~~~~~~~
@@ -61,7 +63,7 @@ To set these options in the CLI, use the ``--n-conn`` argument:
.. code-block:: shell
- python -m zyte_api urls.txt --n-conn 30 --output res.jsonl
+ zyte-api urls.txt --n-conn 30 --output res.jsonl
If too many requests are being processed in parallel, you'll be getting
throttling errors. They are handled by CLI automatically, but they make
@@ -84,7 +86,7 @@ input queries before sending them to the API:
.. code-block:: shell
- python -m zyte_api urls.txt --shuffle --output res.jsonl
+ zyte-api urls.txt --shuffle --output res.jsonl
-Run ``python -m zyte_api --help`` to get description of all supported
+Run ``zyte-api --help`` to get description of all supported
options.
diff --git a/setup.py b/setup.py
index 09e8ef6..d97d3eb 100755
--- a/setup.py
+++ b/setup.py
@@ -21,6 +21,9 @@ def get_version():
author_email='[email protected]',
url='https://github.com/zytedata/python-zyte-api',
packages=find_packages(exclude=['tests', 'examples']),
+ entry_points = {
+ 'console_scripts': ['zyte-api=zyte_api.__main__:_main'],
+ },
install_requires=[
'requests',
'tenacity',
diff --git a/zyte_api/__main__.py b/zyte_api/__main__.py
index f3ed880..b47b5f4 100644
--- a/zyte_api/__main__.py
+++ b/zyte_api/__main__.py
@@ -76,10 +76,10 @@ def read_input(input_fp, intype):
return records
-if __name__ == '__main__':
+def _main(program_name='zyte-api'):
""" Process urls from input file through Zyte Data API """
p = argparse.ArgumentParser(
- prog='python -m zyte_api',
+ prog=program_name,
description="""
Process input URLs from a file using Zyte Data API.
""",
@@ -141,3 +141,7 @@ def read_input(input_fp, intype):
api_key=args.api_key)
loop.run_until_complete(coro)
loop.close()
+
+
+if __name__ == '__main__':
+ _main(program_name='python -m zyte_api')
| Enable âzyte_apiâ instead of âpython -m zyte_apiâ
| 2022-05-17T14:03:12 | 0.0 | [] | [] |
|||
DIRACGrid/WebAppDIRAC | DIRACGrid__WebAppDIRAC-444 | 0fda9e765c3febf5ac420a6899c6b4f5f837567f | diff --git a/WebApp/handler/SiteSummaryHandler.py b/WebApp/handler/SiteSummaryHandler.py
index fef0b263a..edf0dde85 100644
--- a/WebApp/handler/SiteSummaryHandler.py
+++ b/WebApp/handler/SiteSummaryHandler.py
@@ -11,7 +11,6 @@
from WebAppDIRAC.WebApp.handler.ResourceSummaryHandler import ResourceSummaryHandler
class SiteSummaryHandler(ResourceSummaryHandler):
-
AUTH_PROPS = "all"
ELEMENT_TYPE = 'Site'
@@ -251,7 +250,8 @@ def getPlotDict(self, siteName, grouping, reportName, typeName,
status=None):
plotDict = {'condDict': {
- 'Site': [siteName],
+ # DIRAC.AccountingSystem.Client.Types.DataOperation class use 'ExecutionSite' key instead 'Site'
+ 'ExecutionSite' if typeName == 'DataOperation' else 'Site': [siteName],
'grouping': [grouping]
},
'extraArgs': {
diff --git a/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js b/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
index 5b33a07b3..66c9b61e7 100644
--- a/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
+++ b/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
@@ -402,15 +402,79 @@ Ext.define("DIRAC.SiteSummary.classes.SiteSummary", {
var values = {};
- values.name = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "Name");
- values.elementType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "ElementType");
- values.statusType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "StatusType");
-
+ if (me.grid.expandedGridPanel) {
+ if (!me.grid.expandedGridPanel.isExpanded) {
+ values.name = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "Name");
+ values.elementType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "ElementType");
+ values.statusType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "StatusType");
+ values.lastCheckTime = Ext.Date.format(GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "LastCheckTime"), "Y-m-d H:i:s");
+ } else {
+ me.grid.expandedGridPanel.isExpanded = false;
+ values.name = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid.expandedGridPanel, "Name");
+ values.elementType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid.expandedGridPanel, "ElementType");
+ values.statusType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid.expandedGridPanel, "StatusType");
+ values.lastCheckTime = Ext.Date.format(GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid.expandedGridPanel, "LastCheckTime"), "Y-m-d H:i:s");
+ }
+ } else {
+ values.name = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "Name");
+ values.elementType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "ElementType");
+ values.statusType = GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "StatusType");
+ values.lastCheckTime = Ext.Date.format(GLOBAL.APP.CF.getFieldValueFromSelectedRow(me.grid, "LastCheckTime"), "Y-m-d H:i:s");
+ }
return values;
},
__oprSetSite : function(action, newStatus) {
var me = this;
var selectedValues = me.__getSelectedValues();
+ me.getContainer().body.mask("Wait ...");
+ Ext.Ajax.request({
+ url: GLOBAL.BASE_URL + me.applicationName + "/action",
+ method: "POST",
+ params: {
+ action: Ext.JSON.encode([action]),
+ name: Ext.JSON.encode([selectedValues.name]),
+ elementType: Ext.JSON.encode([selectedValues.elementType]),
+ statusType: Ext.JSON.encode([selectedValues.statusType]),
+ status: Ext.JSON.encode([newStatus]),
+ lastCheckTime: Ext.JSON.encode([selectedValues.lastCheckTime])
+ },
+ scope: me,
+ failure: function(response) {
+ GLOBAL.APP.CF.showAjaxErrorMessage(response);
+ },
+ success: function(response) {
+ me.getContainer().body.unmask();
+ var jsonData = Ext.JSON.decode(response.responseText);
+
+ if (jsonData["success"] == "true") {
+ var rowid = null;
+ Ext.dirac.system_info.msg("info", jsonData["result"]);
+ var selectedRows = me.grid.getSelectionModel().getSelection();
+ // we assume that we only select one row...
+ me.grid.getStore().load();
+ me.grid.expandedGridPanel.destroy();
+ delete me.grid.expandedGridPanel;
+
+ Ext.defer(function() {
+ var records = me.grid.getStore().getRange();
+ var record = null;
+ for (var i = 0; i < records.length; i++) {
+ if (records[i].get("Name") == selectedRows[0].get("Name")) {
+ var record = me.grid.getView().getRecord(records[i]);
+ rowid = record.index;
+ me.grid.getSelectionModel().select(record);
+ break;
+ }
+ }
+
+ me.grid.getPlugin().toggleRow(rowid, record);
+ }, 400);
+ } else {
+ me.getContainer().body.unmask();
+ Ext.dirac.system_info.msg("error", jsonData["error"]);
+ }
+ }
+ });
},
__oprShowEditor : function() {
var me = this;
| SiteSummary page: banning/unbanning not working
Site Summary plots are not correct for transfers
In the Site Summary application, when selecting a site, 6 plots are produced: 4 on jobs and 2 on transfers... It seems plots for jobs do concern the selected site while transfer plots are for all channels... They should only refer to the SEs at the selected site...
|
In addition, the status of CEs and SEs only have 2 columns: Name and Type... and the interesting information is missing, i.e. AccessType and Status (Active/banned) | 2021-03-07T09:43:43 | 0.0 | [] | [] |
||
SCECcode/pycsep | SCECcode__pycsep-100 | c6f01f3532f700f5e36790e5d050690107daef72 | diff --git a/csep/core/catalogs.py b/csep/core/catalogs.py
index 2d8f6b3e..15031231 100644
--- a/csep/core/catalogs.py
+++ b/csep/core/catalogs.py
@@ -845,14 +845,33 @@ def plot(self, ax=None, show=False, extent=None, set_global=False, plot_args=Non
"""
# no mutable function arguments
+ plot_args_default = {
+ 'basemap': 'ESRI_terrain',
+ 'markersize': 2,
+ 'markercolor': 'red',
+ 'alpha': 0.3,
+ 'mag_scale': 7,
+ 'legend': True,
+ 'grid_labels': True,
+ 'legend_loc': 3,
+ 'figsize': (8, 8),
+ 'title': self.name,
+ 'mag_ticks': [4.0, 5.0, 6.0, 7.0]
+ }
+ # Plot the region border (if it exists) by default
+ try:
+ # This will throw error if catalog does not have region
+ _ = self.region.num_nodes
+ plot_args_default['region_border'] = True
+ except AttributeError:
+ pass
plot_args = plot_args or {}
- plot_args.setdefault('figsize', (10, 10))
- plot_args.setdefault('title', self.name)
+ plot_args_default.update(plot_args)
# this call requires internet connection and basemap
ax = plot_catalog(self, ax=ax,show=show, extent=extent,
- set_global=set_global, plot_args=plot_args)
+ set_global=set_global, plot_args=plot_args_default)
return ax
diff --git a/csep/core/forecasts.py b/csep/core/forecasts.py
index cb5a4c45..3741e202 100644
--- a/csep/core/forecasts.py
+++ b/csep/core/forecasts.py
@@ -673,10 +673,20 @@ def get_expected_rates(self, verbose=False, return_skipped=False):
else:
return self.expected_rates
- def plot(self, **kwargs):
+ def plot(self, plot_args = None, **kwargs):
+ plot_args = plot_args or {}
if self.expected_rates is None:
self.get_expected_rates()
- self.expected_rates.plot(**kwargs)
+ args_dict = {'title': self.name,
+ 'grid_labels': True,
+ 'grid': True,
+ 'borders': True,
+ 'feature_lw': 0.5,
+ 'basemap': 'ESRI_terrain',
+ }
+ args_dict.update(plot_args)
+ ax = self.expected_rates.plot(**kwargs, plot_args=args_dict)
+ return ax
def get_dataframe(self):
"""Return a single dataframe with all of the events from all of the catalogs."""
diff --git a/csep/core/regions.py b/csep/core/regions.py
index 719cbb70..86272cae 100644
--- a/csep/core/regions.py
+++ b/csep/core/regions.py
@@ -11,7 +11,7 @@
import pyproj
# PyCSEP imports
-from csep.utils.calc import bin1d_vec, cleaner_range
+from csep.utils.calc import bin1d_vec, cleaner_range, first_nonnan, last_nonnan
from csep.utils.scaling_relationships import WellsAndCoppersmith
def california_relm_collection_region(dh_scale=1, magnitudes=None, name="relm-california-collection"):
@@ -730,3 +730,26 @@ def _build_bitmask_vec(self):
return a, xs, ys
+ def tight_bbox(self):
+ # creates tight bounding box around the region, probably a faster way to do this.
+ ny, nx = self.idx_map.shape
+ asc = []
+ desc = []
+ for j in range(ny):
+ row = self.idx_map[j, :]
+ argmin = first_nonnan(row)
+ argmax = last_nonnan(row)
+ # points are stored clockwise
+ poly_min = self.polygons[int(row[argmin])].points
+ asc.insert(0, poly_min[0])
+ asc.insert(0, poly_min[1])
+ poly_max = self.polygons[int(row[argmax])].points
+ desc.append(poly_max[3])
+ desc.append(poly_max[2])
+ # close the loop
+ poly = np.array(asc + desc)
+ sorted_idx = np.sort(np.unique(poly, return_index=True, axis=0)[1], kind='stable')
+ unique_poly = poly[sorted_idx]
+ unique_poly = np.append(unique_poly, [unique_poly[0, :]], axis=0)
+ return unique_poly
+
diff --git a/csep/utils/calc.py b/csep/utils/calc.py
index 2bfc0ff1..b86987a1 100644
--- a/csep/utils/calc.py
+++ b/csep/utils/calc.py
@@ -213,4 +213,13 @@ def cleaner_range(start, end, h):
start = numpy.floor(const * start)
end = numpy.floor(const * end)
d = const * h
- return numpy.arange(start, end + d / 2, d) / const
\ No newline at end of file
+ return numpy.arange(start, end + d / 2, d) / const
+
+def first_nonnan(arr, axis=0, invalid_val=-1):
+ mask = arr==arr
+ return numpy.where(mask.any(axis=axis), mask.argmax(axis=axis), invalid_val)
+
+def last_nonnan(arr, axis=0, invalid_val=-1):
+ mask = arr==arr
+ val = arr.shape[axis] - numpy.flip(mask, axis=axis).argmax(axis=axis) - 1
+ return numpy.where(mask.any(axis=axis), val, invalid_val)
\ No newline at end of file
diff --git a/csep/utils/plots.py b/csep/utils/plots.py
index 0d0e531a..566a19c8 100644
--- a/csep/utils/plots.py
+++ b/csep/utils/plots.py
@@ -652,11 +652,6 @@ def plot_catalog(catalog, ax=None, show=False, extent=None, set_global=False, pl
"""
# Get spatial information for plotting
- bbox = catalog.get_bbox()
- if extent is None:
- dh = (bbox[1] - bbox[0])/20.
- dv = (bbox[3] - bbox[2]) / 20.
- extent = [bbox[0] - dh, bbox[1]+dh, bbox[2] -dv, bbox[3] + dv]
# Retrieve plot arguments
plot_args = plot_args or {}
@@ -674,6 +669,7 @@ def plot_catalog(catalog, ax=None, show=False, extent=None, set_global=False, pl
legend_loc = plot_args.get('legend_loc', 1)
mag_ticks = plot_args.get('mag_ticks', False)
labelspacing = plot_args.get('labelspacing', 1)
+ region_border = plot_args.get('region_border', True)
# cartopy properties
projection = plot_args.get('projection', ccrs.PlateCarree(central_longitude=0.0))
grid = plot_args.get('grid', True)
@@ -685,8 +681,19 @@ def plot_catalog(catalog, ax=None, show=False, extent=None, set_global=False, pl
linecolor = plot_args.get('linecolor', 'black')
- # Instantiage GeoAxes object
+ bbox = catalog.get_bbox()
+ if plot_args['region_border']:
+ try:
+ bbox = catalog.region.get_bbox()
+ except AttributeError:
+ pass
+ if extent is None:
+ dh = (bbox[1] - bbox[0])/20.
+ dv = (bbox[3] - bbox[2]) / 20.
+ extent = [bbox[0] - dh, bbox[1]+dh, bbox[2] -dv, bbox[3] + dv]
+
+ # Instantiage GeoAxes object
if ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, projection=projection)
@@ -727,6 +734,13 @@ def size_map(markersize, values, scale):
loc=legend_loc, title=r"Magnitudes",title_fontsize=16,
labelspacing=labelspacing, handletextpad=5, framealpha=False)
+ if region_border:
+ try:
+ pts = catalog.region.tight_bbox()
+ ax.plot(pts[:, 0], pts[:, 1], lw=1, color='black')
+ except AttributeError:
+ print("unable to get tight bbox")
+
# Gridline options
if grid:
gl = ax.gridlines(draw_labels=grid_labels, alpha=0.5)
@@ -801,6 +815,7 @@ def plot_spatial_dataset(gridded, region, ax=None, show=False, extent=None, set_
borders = plot_args.get('borders', False)
linewidth = plot_args.get('linewidth', True)
linecolor = plot_args.get('linecolor', 'black')
+ region_border = plot_args.get('region_border', True)
# color bar properties
cmap = plot_args.get('cmap', None)
clabel = plot_args.get('clabel', '')
@@ -857,6 +872,10 @@ def plot_spatial_dataset(gridded, region, ax=None, show=False, extent=None, set_
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
+ if region_border:
+ pts = region.tight_bbox()
+ ax.plot(pts[:,0], pts[:,1], lw=1, color='black')
+
# matplotlib figure options
ax.set_title(title, y=1.06)
if filename is not None:
diff --git a/docs/concepts/plots.rst b/docs/concepts/plots.rst
new file mode 100644
index 00000000..321b5e03
--- /dev/null
+++ b/docs/concepts/plots.rst
@@ -0,0 +1,27 @@
+.. _plots-reference:
+
+#####
+Plots
+#####
+
+PyCSEP provides several functions to produce commonly used plots, such as an earthquake forecast or the evaluation catalog
+or perhaps a combination of the two.
+
+.. contents:: Table of Contents
+ :local:
+ :depth: 2
+
+************
+Introduction
+************
+
+
+
+**************
+Plot arguments
+**************
+
+***************
+Available plots
+***************
+
diff --git a/examples/tutorials/catalog_forecast_evaluation.py b/examples/tutorials/catalog_forecast_evaluation.py
index ae9578af..6fcee589 100644
--- a/examples/tutorials/catalog_forecast_evaluation.py
+++ b/examples/tutorials/catalog_forecast_evaluation.py
@@ -33,8 +33,8 @@
# Forecasts should define a time horizon in which they are valid. The choice is flexible for catalog-based forecasts, because
# the catalogs can be filtered to accommodate multiple end-times. Conceptually, these should be separate forecasts.
-start_time = time_utils.strptime_to_utc_datetime("1992-06-28 11:57:34.14")
-end_time = time_utils.strptime_to_utc_datetime("1992-07-28 11:57:34.14")
+start_time = time_utils.strptime_to_utc_datetime("1992-06-28 11:57:35.0")
+end_time = time_utils.strptime_to_utc_datetime("1992-07-28 11:57:35.0")
####################################################################################################################################
# Define spatial and magnitude regions
@@ -72,7 +72,8 @@
forecast = csep.load_catalog_forecast(datasets.ucerf3_ascii_format_landers_fname,
start_time = start_time, end_time = end_time,
- region = space_magnitude_region)
+ region = space_magnitude_region,
+ apply_filters = True)
# Assign filters to forecast
forecast.filters = [f'origin_time >= {forecast.start_epoch}', f'origin_time < {forecast.end_epoch}']
@@ -92,7 +93,12 @@
comcat_catalog = csep.query_comcat(start_time, end_time, min_magnitude=forecast.min_magnitude)
# Filter observed catalog using the same region as the forecast
+
comcat_catalog = comcat_catalog.filter_spatial(forecast.region)
+print(comcat_catalog)
+
+# Plot the catalog
+comcat_catalog.plot()
####################################################################################################################################
# Perform number test
| evaluate catalog forecast example needs modified
`load_catalog_forecast(...)` requires the argument `apply_filters=True` to properly filter the catalogs. the example should reflect this and explain the behavior of this parameter.
| 2021-03-23T23:10:34 | 0.0 | [] | [] |
|||
Sigm0oid/dynamodb-geo.py | Sigm0oid__dynamodb-geo.py-42 | 1910368b020cddfe8275f94d7962cc48ca0ec9bd | diff --git a/dynamodbgeo/DynamoDBManager.py b/dynamodbgeo/DynamoDBManager.py
index 1542696..c029609 100644
--- a/dynamodbgeo/DynamoDBManager.py
+++ b/dynamodbgeo/DynamoDBManager.py
@@ -22,7 +22,7 @@ def queryGeohash(self, queryInput, hashKey: int, range: int):
params['IndexName']=self.config.geohashIndexName
# As eyConditionExpressions must only contain one condition per key, customer passing KeyConditionExpression will be replaced automatically
- params['KeyConditionExpression']='hashKey = :hashKey and ' + str(self.config.geohashAttributeName) +' between :geohashMin and :geohashMax'
+ params['KeyConditionExpression']=str(self.config.hashKeyAttributeName) + ' = :hashKey and ' + str(self.config.geohashAttributeName) +' between :geohashMin and :geohashMax'
if 'ExpressionAttributeValues' in queryInput.keys():
params['ExpressionAttributeValues'].update(
| Rectangular Query missing key schema element: hashkey
Hi, I'm trying to make a rectangular query as such:
```
QueryRectangleInput={
"ExpressionAttributeNames": {
"#n": "name"
},
"FilterExpression": "#n = :val1",
"ExpressionAttributeValues": {
":val1": {"S": query},
}
}
results = geoDataManager.queryRectangle(
dynamodbgeo.QueryRectangleRequest(
dynamodbgeo.GeoPoint(swLat, swLon),
dynamodbgeo.GeoPoint(neLat, neLon),
QueryRectangleInput
)
)
```
But am getting an error regarding the hashkey:
```
An error occurred (ValidationException) when calling the Query operation: Query condition missed key schema element: hashkey
Traceback (most recent call last): File "/var/task/lambda_function.py", line 50, in lambda_handler
results = geoDataManager.queryRectangle( File "/opt/python/dynamodbgeo/GeoDataManager.py", line 49, in queryRectangle results = self.dispatchQueries(covering, QueryRectangleInput) File "/opt/python/dynamodbgeo/GeoDataManager.py", line 39, in dispatchQueries results.extend(self.dynamoDBManager.queryGeohash( File "/opt/python/dynamodbgeo/DynamoDBManager.py", line 37, in queryGeohash response = self.config.dynamoDBClient.query(**params) File "/opt/python/botocore/client.py", line 316, in _api_call return self._make_api_call(operation_name, kwargs) File "/opt/python/botocore/client.py", line 635, in _make_api_call raise error_class(parsed_response, operation_name)
[ERROR] ClientError: An error occurred (ValidationException) when calling the Query operation: Query condition missed key schema element: hashkey Traceback (most recent call last): File "/var/task/lambda_function.py", line 50, in lambda_handler results = geoDataManager.queryRectangle( File "/opt/python/dynamodbgeo/GeoDataManager.py", line 49, in queryRectangle results = self.dispatchQueries(covering, QueryRectangleInput) File "/opt/python/dynamodbgeo/GeoDataManager.py", line 39, in dispatchQueries results.extend(self.dynamoDBManager.queryGeohash( File "/opt/python/dynamodbgeo/DynamoDBManager.py", line 37, in queryGeohash response = self.config.dynamoDBClient.query(**params) File "/opt/python/botocore/client.py", line 316, in _api_call return self._make_api_call(operation_name, kwargs) File "/opt/python/botocore/client.py", line 635, in _make_api_call raise error_class(parsed_response, operation_name)
```
I took a look at the code and I believe the reason why it fails is because [here](https://github.com/Sigm0oid/dynamodb-geo.py/blob/1910368b020cddfe8275f94d7962cc48ca0ec9bd/dynamodbgeo/DynamoDBManager.py#L15) in the queryHash function of the DynamoDBManager the attribute name 'hashKey' is hardcoded, while I configured it as `config.hashKeyAttributeName = 'hashkey'`, so it is throwing because it cannot find 'hashkey' as 'hashKey' is being used I believe.
| 2020-09-07T17:12:07 | 0.0 | [] | [] |
|||
qiskit-community/qiskit-machine-learning | qiskit-community__qiskit-machine-learning-838 | b5e1c521af69a3818f58204bee61298b22de0de3 | diff --git a/qiskit_machine_learning/circuit/library/raw_feature_vector.py b/qiskit_machine_learning/circuit/library/raw_feature_vector.py
index be9cedf7b..bdb0cd460 100644
--- a/qiskit_machine_learning/circuit/library/raw_feature_vector.py
+++ b/qiskit_machine_learning/circuit/library/raw_feature_vector.py
@@ -1,6 +1,6 @@
# This code is part of a Qiskit project.
#
-# (C) Copyright IBM 2020, 2023.
+# (C) Copyright IBM 2020, 2024.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
@@ -176,7 +176,8 @@ def _define(self):
raise QiskitError("Cannot define a ParameterizedInitialize with unbound parameters")
# normalize
- normalized = np.array(cleaned_params) / np.linalg.norm(cleaned_params)
+ norm = np.linalg.norm(cleaned_params)
+ normalized = cleaned_params if np.isclose(norm, 1) else cleaned_params / norm
circuit = QuantumCircuit(self.num_qubits)
circuit.initialize(normalized, range(self.num_qubits))
| Nightly CI seems to have been failing for a while in random jobs
Nightly CI seems to be failing where random jobs show the error below. From what I can see the first occurred on 16th Aug which would be following the release of Qiskit 1.2.0 the day before.
```
File "D:\a\qiskit-machine-learning\qiskit-machine-learning\test\circuit\library\test_raw_feature_vector.py", line 63, in test_fully_bound
self.assertEqual(bound.decompose(), ref)
File "c:\hostedtoolcache\windows\python\3.8.10\x64\lib\unittest\case.py", line 912, in assertEqual
assertion_func(first, second, msg=msg)
File "c:\hostedtoolcache\windows\python\3.8.10\x64\lib\unittest\case.py", line 905, in _baseAssertEqual
raise self.failureException(msg)
AssertionError: <qiskit.circuit.quantumcircuit.QuantumCircuit object at 0x0000029754FC25E0> != <qiskit.circuit.quantumcircuit.QuantumCircuit object at 0x000002975636DBE0>
```
1.2.0 came with changes to have more logic in Rust for performance e.g. circuit data and there were changes to Initialize and StateVector as well which I think Raw Feature ends up using. But the failure seems random as in it passes in most jobs.
| I've been noticing this for a while closer to the 1.2 release. Has this happened in other community repos too? I couldn't find this issue documented and reproduced in isolation in the main Qiskit - do you think it's worth looking into?
It is probably worth running the test case locally in a loop to see if the failure occurs and what happens in the case the comparison fails. I see the test is using randomly generated params so maybe it depends on the value that it creates.
The following reproduces the failure. The test is parameterizing the RawFeatureVector and an equivalently constructed circuit that should represent it with a random set of parameters and checking they are equal. It seems that some parameter values it fails - though if I print the circuits they seem to be the same - at least at the level of precision the values are printed but maybe there is some small difference sometimes that causes the failure. This code, based on what is in the test case, reproduces the failure. Note here I have fixed the random seed so it fails the first time through the loop (I found that by seeding with _i_ the loop count - having tried several I randomly picked where it did not fail). If you comment out the line seeding numpy then it may go through the loop a few times before it fails. I limited the amount of total tries so it would not go indefinitely but the most I saw in the few tries I did was a count in the 30's before it failed.
```
import numpy as np
import qiskit
from qiskit.circuit import QuantumCircuit
from qiskit_machine_learning.circuit.library import RawFeatureVector
failed = False
i = 0
while True:
i += 1
circuit = RawFeatureVector(8)
np.random.seed(seed=8) # Causes a failure first time, comment out for more random fails
params = np.random.random(8) + 1j * np.random.random(8)
params /= np.linalg.norm(params)
bound = circuit.assign_parameters(params)
ref = QuantumCircuit(3)
ref.initialize(params, ref.qubits)
tst = bound.decompose()
if tst != ref or i > 100:
failed = (tst != ref)
break
print(f"{'Failed' if failed else 'Passed'} tries {i}")
print(tst)
print(ref)
```
That seems to be a fluke in the parameter renormalization in the `RawFeatureVector`. I assume due to round-off errors, renormalizing the parameters slightly changes them and then leads to slightly different circuits, e.g. in the snippet you provided @woodsp-ibm, one of the `Rz` gates differs by a factor of 2pi.
The `Statevectors` of the circuits still compare to equal (as they should, the parameters differ only by roundoff errors) so I'm not 100% sure this is truly an error, since we do use slightly different workflows to obtain the circuit. That being said, we could fix it by only renormalizing the input parameters if they are not normalized, i.e. something like
```python
# in ParameterizedInitialize._define
norm = np.linalg.norm(cleaned_params)
normalized = cleaned_params if np.isclose(norm, 1) else cleaned_params / norm
```
I checked and that resolves the problem locally. | 2024-10-04T16:37:34 | 0.0 | [] | [] |
||
tud-zih-energy/voice-annotation-tool | tud-zih-energy__voice-annotation-tool-67 | 6d74c6f7f284f7882d347152db205a00a3c021df | diff --git a/src/voice_annotation_tool/opened_project_frame.py b/src/voice_annotation_tool/opened_project_frame.py
index 7be72c0..f81dc5b 100644
--- a/src/voice_annotation_tool/opened_project_frame.py
+++ b/src/voice_annotation_tool/opened_project_frame.py
@@ -138,9 +138,9 @@ def load_project(self, project: Project):
self.selection_changed
)
self.annotationEdit.clear()
+ self.update_metadata_widgets()
if len(project.annotations):
self.annotationList.setCurrentIndex(self.annotationList.model().index(0, 0))
- self.update_metadata_widgets()
def update_metadata_widgets(self):
"""Disables or enables the widgets used to edit the annotation
@@ -248,7 +248,10 @@ def selection_changed(self, selected, deselected):
self.annotationEdit.blockSignals(True)
self.annotationEdit.setText(annotation.sentence)
self.annotationEdit.blockSignals(False)
- self.audioPlaybackWidget.load_file(annotation.path)
+ if annotation.path.is_file():
+ self.audioPlaybackWidget.load_file(annotation.path)
+ for buttons in self.get_playback_buttons():
+ buttons.setEnabled(annotation.path.is_file())
@Slot()
def import_profile_pressed(self):
| Duplicate warning when audio file is missing
When selecting an annotation whose audio file doesn't exist two errors are shown, one from GStreamer and one from the tool. It would be nicer if missing files where handled differently.
| 2022-03-28T05:37:28 | 0.0 | [] | [] |
|||
appvia/tf2helm | appvia__tf2helm-6 | 75a89086a6e01ab2ed45d5bb5a66668e49483d5b | diff --git a/README.md b/README.md
index 1d46a3b..b42b07e 100644
--- a/README.md
+++ b/README.md
@@ -14,21 +14,21 @@ pip install tf2helm
## Usage
```
-$ tf2helm --help
+tf2helm
Usage: tf2helm [OPTIONS]
tf2helm converts a Terraform module to a Helm Chart [currently only supports
the Terraform Operator]
-
Options:
- --tf_module TEXT Path or URL to a Terraform module.
- --tf_module_url TEXT Specify this if tf_module does not point to a URL.
- --tf_module_version TEXT Terraform module version.
- --tf_version TEXT Terraform version.
- --name TEXT Helm chart name.
- --version TEXT Helm chart version.
- --app_version TEXT Helm chart application version.
- --output_dir TEXT Path to the Helm chart output directory.
- --help Show this message and exit.
+ --tf_module_path TEXT Terraform module local Path e.g.
+ "/local/path/to/module".
+ --tf_module_url TEXT Terraform module URL e.g.
+ "https://github.com/<org>/<module>?ref=<branch|tag>".
+ --tf_version TEXT Terraform version.
+ --name TEXT Helm chart name.
+ --version TEXT Helm chart version.
+ --app_version TEXT Helm chart application version.
+ --output_dir TEXT Path to the Helm chart output directory.
+ --help Show this message and exit
```
diff --git a/setup.py b/setup.py
index bf60ad1..4ab5437 100755
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@
setup(
name = 'tf2helm',
packages = ['tf2helm'],
- version = '0.0.4',
+ version = '0.0.5',
description = 'tf2helm converts a Terraform module to a Helm Chart [currently only supports the Terraform Operator]',
long_description = long_description,
long_description_content_type = 'text/markdown',
@@ -16,7 +16,7 @@
author_email = '[email protected]',
url = 'https://github.com/appvia/tf2helm',
py_modules = ['tf2helm', 'tfparser', 'filehandler'],
- install_requires = ['python-hcl2', 'avionix', 'requests', 'jinja2', 'halo', 'click'],
+ install_requires = ['python-hcl2', 'avionix', 'requests', 'jinja2', 'halo', 'click', 'gitpython'],
keywords = ['terraform', 'helm', 'kubernetes', 'self-service', 'cloud', 'aws', 'azure', 'gcp'],
classifiers = [
"Development Status :: 3 - Alpha",
diff --git a/tf2helm/filehandler.py b/tf2helm/filehandler.py
index ce90000..216765f 100755
--- a/tf2helm/filehandler.py
+++ b/tf2helm/filehandler.py
@@ -3,9 +3,8 @@
import os
import shutil
import requests
-import zipfile
from jinja2 import Environment, FileSystemLoader
-from io import BytesIO
+from git import Repo
def render_template(filename, config, tf_config, chart_filename):
@@ -35,21 +34,20 @@ def copy_file(source, destination):
shutil.copy2(source, destination)
-def download_tf_module(module, version, output_dir):
+def download_tf_module(module, output_dir):
"""
Assumes source code is stored Git.
- Downloads it as a zip file and unzips it in a specified directory.
+ Clones your Git repository into a specified directory.
Arguments:
module: terraform module URL
- version: terraform module version
output_dir: an absolute or relative path to where the terraform module will be stored
"""
- if module.endswith('/'):
- module.strip('/')
- if not version.startswith('v'):
- version = 'v' + version
- url = module + '/archive/refs/tags/' + version + '.zip'
- response = requests.get(module + '/archive/refs/tags/' + version + '.zip')
- archive = zipfile.ZipFile(BytesIO(response.content))
- archive.extractall(output_dir)
- return archive.namelist()[0]
+ repo_url = module.split('?')[0]
+ repo_name = repo_url.split('/')[-1].split('.')[0]
+ if len(module.split('?')) > 1:
+ ref = module.split('?')[1].split('=')[1]
+ repo = Repo.clone_from(repo_url, output_dir + '/' + repo_name)
+ repo.git.checkout(ref)
+ else:
+ Repo.clone_from(repo_url, output_dir + '/' + repo_name)
+ return repo_name
diff --git a/tf2helm/templates/tf_operator.yaml.j2 b/tf2helm/templates/tf_operator.yaml.j2
index 09cd999..b12c756 100644
--- a/tf2helm/templates/tf_operator.yaml.j2
+++ b/tf2helm/templates/tf_operator.yaml.j2
@@ -7,7 +7,7 @@ metadata:
spec:
terraformVersion: {{ tf_config['tf_version'] }}
terraformRunnerPullPolicy: IfNotPresent
- terraformModule: {{ tf_config['tf_module'] }}?ref={{ tf_config['tf_module_version'] }}
+ terraformModule: {{ tf_config['tf_module'] }}
ignoreDelete: false
credentials: {}
env:
diff --git a/tf2helm/tf2helm.py b/tf2helm/tf2helm.py
index 808617a..4094407 100755
--- a/tf2helm/tf2helm.py
+++ b/tf2helm/tf2helm.py
@@ -14,31 +14,30 @@
spinner = Halo(spinner='dots')
[email protected]()
[email protected]('--tf_module', help='Path or URL to a Terraform module.')
[email protected]('--tf_module_url', default=None, help='Specify this if tf_module does not point to a URL.')
[email protected]('--tf_module_version', help='Terraform module version.')
[email protected](no_args_is_help=True)
[email protected]('--tf_module_path', default=None, help='Terraform module local Path e.g. "/local/path/to/module".')
[email protected]('--tf_module_url', default=None, help='Terraform module URL e.g. "https://github.com/<org>/<module>?ref=<branch|tag>".')
@click.option('--tf_version', help='Terraform version.')
@click.option('--name', help='Helm chart name.')
@click.option('--version', help='Helm chart version.')
@click.option('--app_version', help='Helm chart application version.')
@click.option('--output_dir', help='Path to the Helm chart output directory.')
-def main(tf_module, tf_module_version, tf_version, name, version, app_version, output_dir, tf_module_url):
+def main(tf_module_path, tf_module_url, tf_version, name, version, app_version, output_dir):
"""tf2helm converts a Terraform module to a Helm Chart [currently only supports the Terraform Operator]"""
tf_config = {}
- tf_config['tf_module_version'] = tf_module_version
tf_config['tf_version'] = tf_version
try:
spinner.start('Download Terraform module')
time.sleep(1)
- if tf_module.startswith('https://'):
- tf_config['tf_module'] = tf_module
+ if tf_module_url:
+ tf_config['tf_module'] = tf_module_url
tf_module = filehandler.download_tf_module(
- tf_module, tf_module_version, '.modules')
+ tf_module_url, '.modules')
tf_module = '.modules/' + tf_module
- elif not tf_module.startswith('https://'):
- tf_config['tf_module'] = tf_module_url
+ elif tf_module_path:
+ tf_config['tf_module'] = None
+ tf_module = tf_module_path
spinner.succeed()
spinner.start('Translate Terraform module')
time.sleep(1)
| Rename or clarify the purpose of tf_module vs tf_module_url
On an initial glance when using the package, I interpreted `tf_module_url` as the arg to use when pointing to a URL where a Terraform module exists, and it would fetch that module. However this arg is only used to set that URL within the Custom Resource spec (`spec.terraformModule`).
I'd suggest perhaps renaming this arg and the comment, or otherwise making both arguments more explicit in their purpose (i.e. `tf_module_path` to reference a local path when generating a Helm chart which is more relevant for local validation).
CLI throws error when no flag is specified.
When executing the package via CLI, if no flag is specified it will throw the below error:
```
$ tf2helm
â ¹ Download Terraform moduleTraceback (most recent call last):
File "/Users/alastairhinde/Library/Python/3.8/bin/tf2helm", line 8, in <module>
sys.exit(main())
File "/Users/alastairhinde/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1128, in __call__
â ¸ Download Terraform module return self.main(*args, **kwargs)
File "/Users/alastairhinde/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/Users/alastairhinde/Library/Python/3.8/lib/python/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/alastairhinde/Library/Python/3.8/lib/python/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/Users/alastairhinde/Library/Python/3.8/lib/python/site-packages/tf2helm/tf2helm.py", line 35, in main
if tf_module.startswith('https://'):
AttributeError: 'NoneType' object has no attribute 'startswith'
```
Could this show available helper options
| 2022-04-21T04:45:35 | 0.0 | [] | [] |
|||
German-BioImaging/omero-rdf | German-BioImaging__omero-rdf-6 | c85865d583129daa0ab1890231db5c19ce521610 | diff --git a/src/omero_rdf/__init__.py b/src/omero_rdf/__init__.py
index d6ee8f9..7854d22 100644
--- a/src/omero_rdf/__init__.py
+++ b/src/omero_rdf/__init__.py
@@ -28,8 +28,10 @@
from omero.cli import BaseControl, Parser, ProxyStringType
from omero.gateway import BlitzGateway, BlitzObjectWrapper
from omero.model import Dataset, Image, IObject, Plate, Project, Screen
+from omero.sys import ParametersI
from omero_marshal import get_encoder
-from rdflib import BNode, Literal, URIRef
+from rdflib import BNode, Graph, Literal, URIRef
+from rdflib.namespace import DCTERMS, RDF
HELP = """A plugin for exporting rdf from OMERO
@@ -48,6 +50,7 @@
Subj = Union[BNode, URIRef]
Obj = Union[BNode, Literal, URIRef]
Triple = Tuple[Subj, URIRef, Obj]
+Handlers = List[Callable[[URIRef, URIRef, Data], Generator[Triple, None, bool]]]
def gateway_required(func: Callable) -> Callable: # type: ignore
@@ -87,25 +90,51 @@ class Handler:
OME = "http://www.openmicroscopy.org/rdf/2016-06/ome_core/"
OMERO = "http://www.openmicroscopy.org/TBD/omero/"
- def __init__(self, gateway: BlitzGateway) -> None:
+ def __init__(
+ self,
+ gateway: BlitzGateway,
+ pretty_print=False,
+ trim_whitespace=False,
+ use_ellide=False,
+ ) -> None:
self.gateway = gateway
self.cache: Set[URIRef] = set()
self.bnode = 0
- self.annotation_handlers: List[
- Callable[[URIRef, URIRef, Data], Generator[Triple, None, bool]]
- ] = []
+ self.pretty_print = pretty_print
+ self.trim_whitespace = trim_whitespace
+ self.use_ellide = use_ellide
+ self.annotation_handlers = self.load_handlers()
+ self.info = self.load_server()
+
+ if self.pretty_print:
+ self.graph = Graph()
+ self.graph.bind("wd", "http://www.wikidata.org/prop/direct/")
+ self.graph.bind(
+ "ome", "http://www.openmicroscopy.org/rdf/2016-06/ome_core/"
+ )
+ self.graph.bind(
+ "ome-xml", "http://www.openmicroscopy.org/Schemas/OME/2016-06#"
+ ) # FIXME
+ self.graph.bind("omero", "http://www.openmicroscopy.org/TBD/omero/")
+ # self.graph.bind("xs", XMLSCHEMA)
+ # TODO: Allow handlers to register namespaces
+
+ def load_handlers(self) -> Handlers:
+ annotation_handlers: Handlers = []
for ep in entrypoints.get_group_all("omero_rdf.annotation_handler"):
ah_loader = ep.load()
- self.annotation_handlers.append(ah_loader(self))
+ annotation_handlers.append(ah_loader(self))
# We know there are some built in handlers
- assert len(self.annotation_handlers) >= 1
+ assert len(annotation_handlers) >= 1
+ return annotation_handlers
+ def load_server(self) -> Any:
# Attempt to auto-detect server
comm = self.gateway.c.getCommunicator()
- self.info = self.gateway.c.getRouter(comm).ice_getEndpoints()[0].getInfo()
+ return self.gateway.c.getRouter(comm).ice_getEndpoints()[0].getInfo()
def get_identity(self, _type: str, _id: Any) -> URIRef:
- if _type.endswith("I"):
+ if _type.endswith("I") and _type != ("ROI"):
_type = _type[0:-1]
return URIRef(f"https://{self.info.host}/{_type}/{_id}")
@@ -129,37 +158,81 @@ def get_key(self, key: str) -> Optional[URIRef]:
def get_type(self, data: Data) -> str:
return data.get("@type", "UNKNOWN").split("#")[-1]
- def ellide(self, v: Any) -> Literal:
+ def literal(self, v: Any) -> Literal:
+ """
+ Prepare Python objects for use as literals
+ """
if isinstance(v, str):
v = str(v)
- if len(v) > 50:
+ if self.use_ellide and len(v) > 50:
v = f"{v[0:24]}...{v[-20:-1]}"
+ elif v.startswith(" ") or v.endswith(" "):
+ if self.trim_whitespace:
+ v = v.strip()
+ else:
+ logging.warning(
+ "string has whitespace that needs trimming: '%s'", v
+ )
return Literal(v)
- def __call__(self, o: BlitzObjectWrapper) -> None:
- c = o._obj.__class__
+ def get_class(self, o):
+ if isinstance(o, IObject):
+ c = o.__class__
+ else: # Wrapper
+ c = o._obj.__class__
+ return c
+
+ def __call__(self, o: BlitzObjectWrapper) -> URIRef:
+ c = self.get_class(o)
encoder = get_encoder(c)
if encoder is None:
raise Exception(f"unknown: {c}")
else:
data = encoder.encode(o)
- self.handle(data)
+ return self.handle(data)
- def handle(self, data: Data) -> None:
+ def handle(self, data: Data) -> URIRef:
"""
- TODO: Add quad representation as an option
+ Parses the data object into RDF triples.
+
+ Returns the id for the data object itself
"""
+ # TODO: Add quad representation as an option
output: Triple
- for output in self.rdf(data):
- if output:
- s, p, o = output
- if None in (s, p, o):
- logging.debug("skipping None value: %s %s %s", s, p, o)
+
+ str_id = data.get("@id")
+ if not str_id:
+ raise Exception(f"missing id: {data}")
+
+ # TODO: this call is likely redundant
+ _type = self.get_type(data)
+ _id = self.get_identity(_type, str_id)
+
+ for triple in self.rdf(_id, data):
+ if triple:
+ if None in triple:
+ logging.debug("skipping None value: %s %s %s", triple)
else:
- print(f"""{s.n3()}\t{p.n3()}\t{o.n3()} .""")
+ self.emit(triple)
+
+ return _id
+
+ def emit(self, triple: Triple):
+ if self.pretty_print:
+ self.graph.add(triple)
+ else:
+ # Streaming
+ s, p, o = triple
+ print(f"""{s.n3()}\t{p.n3()}\t{o.n3()} .""")
+
+ def close(self):
+ if self.pretty_print:
+ print(self.graph.serialize())
def rdf(
- self, data: Data, _id: Optional[Subj] = None
+ self,
+ _id: Subj,
+ data: Data,
) -> Generator[Triple, None, None]:
_type = self.get_type(data)
@@ -174,21 +247,18 @@ def rdf(
)
# End workaround
- if not _id:
- str_id = data.get("@id")
- if not str_id:
- raise Exception(f"missing id: {data}")
- _id = self.get_identity(_type, str_id)
- if _id in self.cache:
- logging.debug("# skipping previously seen %s", _id)
- return
- else:
- self.cache.add(_id)
+ if _id in self.cache:
+ logging.debug("# skipping previously seen %s", _id)
+ return
+ else:
+ self.cache.add(_id)
for k, v in sorted(data.items()):
- if k in ("@type", "@id", "omero:details", "Annotations"):
- # Types that we want to omit fo
+ if k == "@type":
+ yield (_id, RDF.type, URIRef(v))
+ elif k in ("@id", "omero:details", "Annotations"):
+ # Types that we want to omit for now
pass
else:
@@ -200,28 +270,38 @@ def rdf(
if isinstance(v, dict):
# This is an object
if "@id" in v:
- # With an identity, use a reference
- v_type = self.get_type(v)
- val = self.get_identity(v_type, v["@id"])
- yield (_id, key, val)
- yield from self.rdf(v)
+ yield from self.yield_object_with_id(_id, key, v)
else:
# Without an identity, use a bnode
# TODO: store by value for re-use?
bnode = self.get_bnode()
yield (_id, key, bnode)
- yield from self.rdf(v, _id=bnode)
+ yield from self.rdf(bnode, v)
elif isinstance(v, list):
# This is likely the [[key, value], ...] structure?
+ # can also be shapes
for item in v:
- bnode = self.get_bnode()
- # TODO: KVPs need ordering info
- yield (_id, URIRef(f"{self.OME}Map"), bnode)
- yield (bnode, URIRef(f"{self.OME}Key"), self.ellide(item[0]))
- yield (bnode, URIRef(f"{self.OME}Value"), self.ellide(item[1]))
+ if isinstance(item, dict) and "@id" in item:
+ yield from self.yield_object_with_id(_id, key, item)
+ elif isinstance(item, list) and len(item) == 2:
+ bnode = self.get_bnode()
+ # TODO: KVPs need ordering info, also no use of "key" here.
+ yield (_id, URIRef(f"{self.OME}Map"), bnode)
+ yield (
+ bnode,
+ URIRef(f"{self.OME}Key"),
+ self.literal(item[0]),
+ )
+ yield (
+ bnode,
+ URIRef(f"{self.OME}Value"),
+ self.literal(item[1]),
+ )
+ else:
+ raise Exception(f"unknown list item: {item}")
else:
- yield (_id, key, self.ellide(v)) # TODO: Use Literal
+ yield (_id, key, self.literal(v))
# Special handling for Annotations
annotations = data.get("Annotations", [])
@@ -236,104 +316,147 @@ def rdf(
break
if not handled: # TODO: could move to a default handler
- yield (
- _id,
- URIRef(f"{self.OME}annotation"),
- self.get_identity("AnnotationTBD", annotation["@id"]),
- )
- yield from self.rdf(annotation)
+ aid = self.get_identity("AnnotationTBD", annotation["@id"])
+ yield (_id, URIRef(f"{self.OME}annotation"), aid)
+ yield from self.rdf(aid, annotation)
+
+ def yield_object_with_id(self, _id, key, v):
+ """
+ Yields a link to the object as well as its representation.
+ """
+ v_type = self.get_type(v)
+ val = self.get_identity(v_type, v["@id"])
+ yield (_id, key, val)
+ yield from self.rdf(_id, v)
class RdfControl(BaseControl):
def _configure(self, parser: Parser) -> None:
parser.add_login_arguments()
+ rdf_type = ProxyStringType("Image")
+ rdf_help = "Object to be exported to RDF"
+ parser.add_argument("target", type=rdf_type, nargs="*", help=rdf_help)
parser.add_argument(
- "--force",
- "-f",
- default=False,
+ "--pretty",
action="store_true",
- help="Actually do something. Default: false.",
+ default=False,
+ help="Print in NT, prevents streaming",
+ )
+ parser.add_argument(
+ "--ellide", action="store_true", default=False, help="Shorten strings"
)
parser.add_argument(
- "--block-size",
- "-b",
- default=100,
+ "--trim-whitespace",
action="store_true",
- help="Actually do something. Default: false.",
+ default=False,
+ help="Remove leading and trailing whitespace from literals",
)
- rdf_type = ProxyStringType("Image")
- rdf_help = "Object to be exported to RDF"
- parser.add_argument("target", type=rdf_type, help=rdf_help)
parser.set_defaults(func=self.action)
@gateway_required
def action(self, args: Namespace) -> None:
- self.descend(self.gateway, args.target, batch=1)
+ handler = Handler(
+ self.gateway,
+ pretty_print=args.pretty,
+ use_ellide=args.ellide,
+ trim_whitespace=args.trim_whitespace,
+ )
+ self.descend(self.gateway, args.target, handler)
+ handler.close()
+ # TODO: move to handler?
def descend(
self,
gateway: BlitzGateway,
target: IObject,
- batch: int = 100,
- handler: Optional[Handler] = None,
- ) -> None:
+ handler: Handler,
+ ) -> URIRef:
"""
Copied from omero-cli-render. Should be moved upstream
"""
- if handler is None:
- handler = Handler(gateway)
-
if isinstance(target, list):
for x in target:
- self.descend(gateway, x, batch)
+ randomid = self.descend(gateway, x, handler)
+ return randomid # TODO return a list?
+
elif isinstance(target, Screen):
scr = self._lookup(gateway, "Screen", target.id)
- handler(scr)
+ scrid = handler(scr)
for plate in scr.listChildren():
- self.descend(gateway, plate._obj, batch)
+ pltid = self.descend(gateway, plate._obj, handler)
+ handler.emit((pltid, DCTERMS.isPartOf, scrid))
+ handler.emit((scrid, DCTERMS.hasPart, pltid))
for annotation in scr.listAnnotations(None):
handler(annotation)
+ return scrid
+
elif isinstance(target, Plate):
plt = self._lookup(gateway, "Plate", target.id)
- handler(plt)
+ pltid = handler(plt)
for annotation in plt.listAnnotations(None):
handler(annotation)
for well in plt.listChildren():
- handler(well) # No descend
+ wid = handler(well) # No descend
+ handler.emit((wid, DCTERMS.isPartOf, pltid))
for idx in range(0, well.countWellSample()):
img = well.getImage(idx)
- handler(img.getPrimaryPixels())
- handler(img) # No descend
+ imgid = self.descend(gateway, img._obj, handler)
+ handler.emit((imgid, DCTERMS.isPartOf, wid))
+ handler.emit((wid, DCTERMS.hasPart, imgid))
+ return pltid
elif isinstance(target, Project):
prj = self._lookup(gateway, "Project", target.id)
- handler(prj)
+ prjid = handler(prj)
for annotation in prj.listAnnotations(None):
handler(annotation)
for ds in prj.listChildren():
- self.descend(gateway, ds._obj, batch)
+ dsid = self.descend(gateway, ds._obj, handler)
+ handler.emit((dsid, DCTERMS.isPartOf, prjid))
+ handler.emit((prjid, DCTERMS.hasPart, dsid))
+ return prjid
elif isinstance(target, Dataset):
ds = self._lookup(gateway, "Dataset", target.id)
- handler(ds)
+ dsid = handler(ds)
for annotation in ds.listAnnotations(None):
handler(annotation)
for img in ds.listChildren():
- handler(img) # No descend
- handler(img.getPrimaryPixels())
- for annotation in img.listAnnotations(None):
- handler(annotation)
+ imgid = self.descend(gateway, img._obj, handler)
+ handler.emit((imgid, DCTERMS.isPartOf, dsid))
+ handler.emit((dsid, DCTERMS.hasPart, imgid))
+ return dsid
elif isinstance(target, Image):
img = self._lookup(gateway, "Image", target.id)
- handler(img)
- handler(img.getPrimaryPixels())
+ imgid = handler(img)
+ pixid = handler(img.getPrimaryPixels())
+ handler.emit((pixid, DCTERMS.isPartOf, imgid))
+ handler.emit((imgid, DCTERMS.hasPart, pixid))
for annotation in img.listAnnotations(None):
+ img._loadAnnotationLinks()
handler(annotation)
+ for roi in self._get_rois(gateway, img):
+ handler(roi)
+ return imgid
else:
- self.ctx.die(111, "TBD: %s" % target.__class__.__name__)
+ self.ctx.die(111, "unknown target: %s" % target.__class__.__name__)
+
+ def _get_rois(self, gateway, img):
+ params = ParametersI()
+ params.addId(img.id)
+ query = """select r from Roi r
+ left outer join fetch r.annotationLinks as ral
+ left outer join fetch ral.child as rann
+ left outer join fetch r.shapes as s
+ left outer join fetch s.annotationLinks as sal
+ left outer join fetch sal.child as sann
+ where r.image.id = :id"""
+ return gateway.getQueryService().findAllByQuery(
+ query, params, {"omero.group": str(img.details.group.id.val)}
+ )
def _lookup(
self, gateway: BlitzGateway, _type: str, oid: int
| Class definitions in output
Running the example mentioned in #1, I was wondering why there are hardly any type defining statements, such as
```
<http://ome..../Image/12345> a <http://www.openmicroscopy.org/rdf/2016-06/ome_core/ImageFile>
...
<https://ome.evolbio.mpg.de/MapAnnotation/1985015> a <http://www.openmicroscopy.org/rdf/2016-06/ome_core/Map>
...
```
This would enable queries like
```sparql
select * where {
?img a ome_core:ImageFile;
}
```
Implicitely, the [ome_core ontology](https://gitlab.com/openmicroscopy/incubator/ome-owl/-/blob/master/ontology/owl/ome_core/ome_core.owl.ttl) induces these type definitions, so would be enough to append ome_core.owl.ttl to the output (maybe make it optional via command line flag).
| :+1: for adding more type definitions but primarily it was unclear to me (and still is, to be honest) if we were going down the ome_core ontology root, or starting from new LinkML-generated base classes. seealso: https://github.com/ome/ome-model/pull/180 | 2024-02-09T10:49:37 | 0.0 | [] | [] |
||
iamleot/transferwee | iamleot__transferwee-43 | 39f1dcdf358327c64f799a516fff9d7a6ed162fc | diff --git a/transferwee.py b/transferwee.py
index ac3b6f1..1f989c0 100755
--- a/transferwee.py
+++ b/transferwee.py
@@ -191,18 +191,19 @@ def _prepare_session() -> Optional[requests.Session]:
requests.
"""
s = requests.Session()
- s.headers.update({"User-Agent": WETRANSFER_USER_AGENT})
- r = s.get("https://wetransfer.com/")
- m = re.search('name="csrf-token" content="([^"]+)"', r.text)
- if not m:
- logger.error(f"Could not find any csrf-token")
- return None
s.headers.update(
{
- "x-csrf-token": m.group(1),
+ "User-Agent": WETRANSFER_USER_AGENT,
"x-requested-with": "XMLHttpRequest",
}
)
+ r = s.get("https://wetransfer.com/")
+ m = re.search('name="csrf-token" content="([^"]+)"', r.text)
+ if m:
+ logger.debug(f"Setting x-csrf-token header to {m.group(1)}")
+ s.headers.update({"x-csrf-token": m.group(1)})
+ else:
+ logger.debug(f"Could not find any csrf-token")
return s
| Downloads not working - Could not find any csrf-token in _prepare_session
It seems like WeTransfer changed something in the past couple of days and download_url now raises ConnectionError('Could not prepare session').
_prepare_session reports "Could not find any csrf-token".
| The problem seems to be that wetransfer.com no longer has a csrf token to be parsed at line 192 in [_prepare_session](https://github.com/iamleot/transferwee/blob/c5f6984c335ff8b64dea50cb82660aa67da748a9/transferwee.py#LL192C20-L192C37)
Hello! Can you please share a possible way to reproduce it?
At least yesterday and last night all tests passed successfully (against current Git HEAD).
I've also rechecked via `tests/check.sh` now and I could not reproduce it.
I get the same error running the check script.
```
(base) â tests git:(master) ./check.sh
Creating a test file...
Uploading the test file...
Checking that all files exists
Checking for no duplicate filenames
Preparing to upload
Could not find any csrf-token
Traceback (most recent call last):
File "/transferwee_orig/transferwee/transferwee.py", line 566, in <module>
print(upload(args.files, args.n, args.m, args.f, args.t))
File "/transferwee_orig/transferwee/transferwee.py", line 485, in upload
raise ConnectionError('Could not prepare session')
ConnectionError: Could not prepare session
```
Looking at the wetransfer.com website source I see no csrf element to parse.
Buuut...I am connecting from Sweden. Switching to VPN and connecting from the US the csrf token is there. Are they rolling out a new website by region?
I have no idea, sorry! Everything that I know about wetransfer.com was reverse engineered!
That's interesting though! Unfortunately until I will not be able to reproduce I could probably not easily help.
Tested various methods and it seems that cookies are used instead of the csrf meta element in some cases. Continuing with the fetched session makes the check script work again. | 2023-05-20T20:09:19 | 0.0 | [] | [] |
||
ChristianTremblay/BAC0 | ChristianTremblay__BAC0-321 | 6e6651fa143b9faf5f3e98419bfe92b304a69bc7 | diff --git a/BAC0/core/functions/TimeSync.py b/BAC0/core/functions/TimeSync.py
index 743f4337..8d85a969 100644
--- a/BAC0/core/functions/TimeSync.py
+++ b/BAC0/core/functions/TimeSync.py
@@ -156,7 +156,7 @@ def local_date(self):
def utcOffset(self):
"Returns UTC offset in minutes"
- return self.now.astimezone().utcoffset().total_seconds() / 60
+ return round(self.now.astimezone().utcoffset().total_seconds() / 60)
def is_dst(self):
return self.timezone.dst(self.now) != dt.timedelta(0)
| Develop
Waited enough.... time to sync master
| 2022-04-15T21:38:00 | 0.0 | [] | [] |
|||
pypa/installer | pypa__installer-201 | 6d4cf8394bbf6edf7d6f147280b1ec84f1504913 | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e50ab932..cc7fe396 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -20,7 +20,7 @@ jobs:
strategy:
matrix:
os: [Windows, Ubuntu, MacOS]
- python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
+ python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
include:
# Only run PyPy jobs, on Ubuntu.
- os: Ubuntu
@@ -30,9 +30,10 @@ jobs:
- uses: actions/checkout@v3
# Get Python to test against
- - uses: actions/setup-python@v4
+ - uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
+ allow-prereleases: true
# Setup pip's cache
- name: Save date (for cache)
diff --git a/noxfile.py b/noxfile.py
index a690c59b..6a69ccec 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -22,7 +22,7 @@ def lint(session):
session.run("pre-commit", "run", "--all-files", *args)
[email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy3"])
[email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "pypy3"])
def test(session):
session.install(".")
session.install("-r", "tests/requirements.txt")
@@ -42,7 +42,7 @@ def test(session):
)
[email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy3"])
[email protected](python=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "pypy3"])
def doctest(session):
session.install(".")
session.install("-r", "docs/requirements.txt")
diff --git a/src/installer/scripts.py b/src/installer/scripts.py
index d18060bd..c9f96b41 100644
--- a/src/installer/scripts.py
+++ b/src/installer/scripts.py
@@ -3,9 +3,19 @@
import io
import os
import shlex
+import sys
import zipfile
-from importlib.resources import read_binary
-from typing import TYPE_CHECKING, Mapping, Optional, Tuple
+from types import ModuleType
+from typing import TYPE_CHECKING, Mapping, Optional, Tuple, Union
+
+if sys.version_info >= (3, 9): # pragma: no cover
+ from importlib.resources import files
+
+ def read_binary(package: Union[str, ModuleType], file_path: str) -> bytes:
+ return (files(package) / file_path).read_bytes()
+
+else: # pragma: no cover
+ from importlib.resources import read_binary
from installer import _scripts
| scripts.py utilizes the deprecated read_binary function from importlib.resources.
When building for Fedora I noticed a deprecation warning:
```
DeprecationWarning: read_binary is deprecated. Use files() instead. Refer to https://importlib-resources.readthedocs.io/en/latest/using.html#migrating-from-legacy for migration advice.
```
| As mentioned in the docs it links to, you cannot really take advantage of this deprecation warning without dropping compat for 50% of supported python versions. Honestly it always seemed odd to me that it's a warning at all (but the stability of importlib.resources is a separate topic).
The current spot of use is:
https://github.com/pypa/installer/blob/8105b1d3a20f19d5a9026ede363015fbbd644ebc/src/installer/scripts.py#L6
I guess we could do something like:
```py
if sys.version_info >= (3, 9):
from importlib.resources import files
def read_binary(package, file_path):
return (files(package) / file_path).read_binary()
else:
from importlib.resources import read_binary
```
It's dumb that I'll have to do this, but enough has been said about how messily the `importlib.resources`/`importlib.metadata` have handled backwards compatibility.
This has become the case with Python 3.13 from which read_binary was removed. We are already hit by this as downstream packagers in Fedora Linux during the early integration with Python 3.13 alpha 1. | 2023-11-23T15:42:52 | 0.0 | [] | [] |
||
0b01001001/spectree | 0b01001001__spectree-153 | 1d905e80834830be1bd7941d174dfcbdbf30eb3f | diff --git a/spectree/spec.py b/spectree/spec.py
index 15249f4f..c923eb99 100644
--- a/spectree/spec.py
+++ b/spectree/spec.py
@@ -255,15 +255,15 @@ def _generate_spec(self):
"paths": {**routes},
"components": {
"schemas": {**self.models, **self._get_model_definitions()},
- "securitySchemes": {
- scheme.name: scheme.data.dict(exclude_none=True, by_alias=True)
- for scheme in self.config.SECURITY_SCHEMES
- }
- if self.config.SECURITY_SCHEMES
- else {},
},
}
+ if self.config.SECURITY_SCHEMES:
+ spec["components"]["securitySchemes"] = {
+ scheme.name: scheme.data.dict(exclude_none=True, by_alias=True)
+ for scheme in self.config.SECURITY_SCHEMES
+ }
+
if self.config.SECURITY:
spec["security"] = [
{security_name: security_config}
| [BUG] Redoc is showing a rogue "Authentication" section
**Describe the bug**
In the section headers in the left navigation, Redoc shows a section called Authentication, even if no Authentication tags are defined. If endpoints are defined with an Authentication tag, another Authentication section is created.
**To Reproduce**
Steps to reproduce the behavior:
- Create a spectree spec
-
**Expected behavior**
No undefined sections
**Python Information (please complete the following information):**
- Python==3.8.10
- spectree>=0.5.1
| 2021-06-18T14:00:15 | 0.0 | [] | [] |
|||
mnbvc-parallel-corpus-team/parallel_corpus_mnbvc | mnbvc-parallel-corpus-team__parallel_corpus_mnbvc-34 | d5663d97bb0ef5061a98b3000e646800364560dd | diff --git a/.gitignore b/.gitignore
index fa32fbc..89c3236 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,6 +7,11 @@ downloaded_websites
download_pdf
error_url.txt
+cache_*
+wandb
+*.json.lock
+gpt_cache
+batch_cache
# C extensions
*.so
diff --git a/alignment/README.md b/alignment/README.md
index eed99c5..623224b 100644
--- a/alignment/README.md
+++ b/alignment/README.md
@@ -50,3 +50,30 @@ The module outputs the following information:
- A classification report providing precision, recall, F1-score, and support for softline and non-softline instances.
+## batch_sequential_for_one_file.py
+
+example: `python batch_sequential_for_one_file.py --api_key=sk-xxxxxx --dataset_index=0~15293`
+
+
+options:
+
+ --api_key API_KEY openai api key
+ --dataset_index DATASET_INDEX æ件ä¸æ ï¼è¯·ç»ä¸ä¸ª0~9000çæ´æ°ï¼æ¯ä¸ªæ´æ°å¯¹åºä¸ä¸ªæ件çä»»å¡
+
+å¨æ£å¼è¿è¡ä¹åï¼æ们建议å
å线ç¨è¿è¡ä¸æ¬¡èæ¬è·0ä¸æ çä»»å¡ï¼
+
+```
+python batch_sequential_for_one_file.py --key=[Your_Key] --dataset_index=0
+```
+
+è¿æ¬¡è¿è¡æ¯ä¸ºäºå°hfæ°æ®éä¸è½½å¹¶ä¸ç¼åå°å·¥ä½ç®å½ï¼é¿å
ä¹åç请æ±ä¸åå¤è®¿é®hfã
+
+ä¹åï¼æ们å¯ä»¥ä»¤èæ¬å¹¶è¡å°è¿è¡ï¼æ们建议ï¼éè¿å½ä»¤è¡æ°å»ºè¿ç¨çæ¹å¼æ¥æ§è¡è¿ä¸ªèæ¬ï¼
+
+```python
+os.system('python batch_sequential_for_one_file.py --key=sk-xxxxxx --dataset_index=0~15293')
+```
+
+æ¯ä¸ªæ件å®ææ¶ï¼æ¬å°å·¥ä½ç®å½ä¸ç`batch_cache/done`éä¼åæå·²ç»å¤çå®æ¯çæ件æ å·åå
¶å段ç»æã
+
+å¦æä¸ä¸ªæ件çä»»å¡å¨æ端æ
åµä¸ï¼æç½ï¼openaiçserver errorï¼overload errorçï¼æ æ³è¢«å®æï¼èæ¬ä¼æexceptionç¶åç´æ¥ä¸æï¼è¿åç éé¶ãè¿ç§æ
åµä¸å¹¶åè°ç¨çèæ¬åºè¯¥è®°ä¸ä¸æ件å·ï¼ç¶åå
ä¸ç®¡è¿ä¸ªä»»å¡ï¼è®©å
¶å®ä»»å¡ç»§ç»ãä¸æ¬¡è¿è¡åæ们ååæè¿äºæªè¢«æ£ç¡®å¤ççæ件ã
diff --git a/alignment/batch_detector.py b/alignment/batch_detector.py
index 8d9b526..4f10a97 100644
--- a/alignment/batch_detector.py
+++ b/alignment/batch_detector.py
@@ -3,8 +3,8 @@
from pathlib import Path
import json
-from text_segmenter import HardLineBreakDetector
-import utils
+from alignment.text_segmenter import HardLineBreakDetector
+import alignment.utils as utils
class GPTBatchDetector(HardLineBreakDetector):
@@ -36,7 +36,6 @@ def create_batches(self, lines: list[str]) -> list[list[str]]:
words = line.split()
# Estimate the token count for the current line
line_token_count = len(words) * (100 / 75)
-
# Check if adding this line would exceed the token limit
if token_count + line_token_count > self.token_limit:
# If so, finish the current batch and start a new one
diff --git a/alignment/batch_sequential_detector.py b/alignment/batch_sequential_detector.py
index 2091fab..46b0c47 100644
--- a/alignment/batch_sequential_detector.py
+++ b/alignment/batch_sequential_detector.py
@@ -6,6 +6,7 @@
from pathlib import Path
import json
import re
+from difflib import SequenceMatcher
import tiktoken
import pylcs
@@ -16,7 +17,9 @@
LCSTokenInfo = namedtuple('LCSTokenInfo', ('token', 'length', 'source_line_id'))
class GPTBatchSequentialDetector(HardLineBreakDetector):
- def __init__(self, name, cache_dir, token_limit=500, use_proxy=False, re_ask_times=3):
+ LEADING_NOISE_SCAN_LINE_LIMIT = 12 #
+
+ def __init__(self, name, cache_dir, token_limit=500, use_proxy=False, re_ask_times=3, ignore_leading_noise_lines=True):
super().__init__(name)
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
@@ -25,6 +28,7 @@ def __init__(self, name, cache_dir, token_limit=500, use_proxy=False, re_ask_tim
self.use_proxy = use_proxy
self.encoder = tiktoken.encoding_for_model("gpt-3.5-turbo")
self.re_ask_times = re_ask_times
+ self.ignore_leading = ignore_leading_noise_lines
@staticmethod
def clearup_output(raw_output_from_chatgpt: str) -> list[str]:
@@ -133,7 +137,7 @@ def lcs_sequence_alignment(input_lines: list[str] , output_lines: list[str]) ->
return align_map, input_hit_rate, output_hit_rate
@staticmethod
- def align_and_drop_bad_alignment(input_lines: list[str] | str, output_lines: list[str] | str) -> dict[int, Tuple[int, int]]:
+ def align_and_drop_bad_alignment(input_lines: list[str] | str, output_lines: list[str] | str, drop_last_paragraph=True) -> dict[int, Tuple[int, int]]:
"""
è¿ä¸ªå½æ°æ¯lcs_sequence_alignmentçå°è£
ï¼ç¨äºä¸¢æå
¶å¯¹é½å¾*ä¸å¥½*ç段è½ã
å
·ä½æ¥è¯´ï¼
@@ -145,6 +149,7 @@ def align_and_drop_bad_alignment(input_lines: list[str] | str, output_lines: lis
Args:
input_lines(str): è¾å
¥çä¸æ®µè¯ï¼éè¦ä¿è¯ä¸åå¨ç©ºè¡
output_lines(str): chatgptç»å¯¹é½å¥½çä¸æ®µè¯ï¼éè¦ä¿è¯ä¸åå¨ç©ºè¡
+ drop_last_paragraph(bool): æ¯å¦ä¸¢ææåºæ¥çæåä¸ä¸ªå¤§æ®µï¼å¦æå½åbatchå·²ç»è·å®å©ä½çææææ¬ï¼åä¸éè¦ä¸¢ææåä¸ä¸ªå¤§æ®µ
Returns:
align_map(dict[int, Tuple[int, int]]): è¾åºè¡å·å¯¹åºè¾å
¥çè¡å·ï¼å¯ä»¥åè§å½æ°lcs_sequence_alignmentçæ ·ä¾
@@ -175,7 +180,7 @@ def align_and_drop_bad_alignment(input_lines: list[str] | str, output_lines: lis
for p, i in enumerate(output_hit_rate):
output_hit_rate[p] /= sum(map(len, output_lines[p].split()))
- if len(align_map) > 1:
+ if len(align_map) > 1 and drop_last_paragraph:
align_map.pop(max(align_map.keys())) # å¹²ææåä¸ä¸ªå¤§æ®µï¼é¿å
ä¸å®å
¨æ段
# 为äºé²æ¢å 段ç°è±¡å½±ååç¡®çï¼å¹é
çä½äº60%çoutput_line_idç´æ¥ææ
@@ -187,7 +192,7 @@ def align_and_drop_bad_alignment(input_lines: list[str] | str, output_lines: lis
return align_map
@staticmethod
- def construct_segment_list_from_output_text(raw_text: str, output_text: str, use_identical_mapping_when_failure=False) -> list[Tuple[int, int]]:
+ def construct_segment_list_from_output_text(raw_text: str, output_text: str, use_identical_mapping_when_failure=False, drop_last_paragraph=True) -> list[Tuple[int, int]]:
"""
ä»è¾åºä¸æé 段è½åºé´è¡¨ã
use_identical_mapping_when_failureåæ°ç¨äºæ§å¶æ¯å¦å¨output_textè·è¾å
¥å®å
¨
@@ -211,7 +216,7 @@ def construct_segment_list_from_output_text(raw_text: str, output_text: str, use
[[0, 0], [1, 2], [3, 4]]
"""
- align_map = GPTBatchSequentialDetector.align_and_drop_bad_alignment(raw_text, GPTBatchSequentialDetector.clearup_output(output_text))
+ align_map = GPTBatchSequentialDetector.align_and_drop_bad_alignment(raw_text, GPTBatchSequentialDetector.clearup_output(output_text), drop_last_paragraph)
if len(align_map) == 0:
if use_identical_mapping_when_failure:
# å¦æåå¤éé®é½æ²¡æåæ³è§£å³ï¼å°±ä»¤æ¢è¡åæ ·è¿åï¼è¿éå¤çæ¹å¼æ¯æé ä¸ä¸ªæçæ å°è¡¨ä½ä¸ºæ¿ä»£ï¼å¦[[0, 0], [1, 1], [2, 2], [3, 3]]
@@ -219,7 +224,7 @@ def construct_segment_list_from_output_text(raw_text: str, output_text: str, use
return list(align_map.values())
- def align_gpt_linebreak_detection_request(self, raw_text: str, record_id: str, batch_index: int) -> list[Tuple[int, int]]:
+ def align_gpt_linebreak_detection_request(self, raw_text: str, record_id: str, batch_index: int, drop_last_paragraph=True) -> list[Tuple[int, int]]:
"""
Sends a request to the GPT-3.5 API to detect hard line breaks in the given text,
and align the given text to its output text on the fly.
@@ -231,9 +236,10 @@ def align_gpt_linebreak_detection_request(self, raw_text: str, record_id: str, b
raw_text (str): The raw text to be processed.
record_id (int): The unique id of the record.
batch_index (int): The index of the batch.
+ drop_last_paragraph (bool): set to False if the current batch is the last batch, so that the last paragraph will not be dropped.
Returns:
- dict[int, set[int]]: The aligned paragragh group, indicating a output line refers to which input lines.
+ list[Tuple[int, int]]: The aligned paragragh group intervals indicating a output line refers to which input lines.
"""
filename = self.cache_dir / f'record_{record_id}_processed_batch_{batch_index}.json'
@@ -241,7 +247,7 @@ def align_gpt_linebreak_detection_request(self, raw_text: str, record_id: str, b
for re_ask_time in range(self.re_ask_times):
output_text = utils.gpt_detect_hard_line_breaks(raw_text, use_proxy=self.use_proxy)
segment_list = GPTBatchSequentialDetector.construct_segment_list_from_output_text(raw_text, output_text,
- use_identical_mapping_when_failure=re_ask_time == self.re_ask_times - 1)
+ re_ask_time == self.re_ask_times - 1, drop_last_paragraph)
if len(segment_list) == 0: # è®°å½ä¸ä¸GPT说çè¡è¯ä»¥ä¾¿æ¥ååæ
with Path('unexpected_outputs.jsonl').open('a', encoding='utf-8') as f:
json.dump({'time': str(datetime.now()), 'record': record_id, 'batch': batch_index, 'input': raw_text, 'output': output_text})
@@ -254,7 +260,7 @@ def align_gpt_linebreak_detection_request(self, raw_text: str, record_id: str, b
with filename.open('r') as f:
output_text = json.load(f)
segment_list = GPTBatchSequentialDetector.construct_segment_list_from_output_text(raw_text, output_text,
- use_identical_mapping_when_failure=True)
+ True, drop_last_paragraph)
return segment_list
@@ -282,6 +288,74 @@ def generate_batch(self, lines: list[str], begin_lineid: int) -> str:
if buffer:
return buffer
+ def ignore_first_page_leading_noises(self, lines: list[str]) -> int:
+ """
+ 忽ç¥æé¦è¡çä¸äºçä¼¼é¦é¡µåªå£°çä¸è¥¿ï¼é¿å
第ä¸ä¸ªbatchæ段ææä¸å¥½ã
+
+ è¿éç»ä¸ä¸ªæ ·æ¬ï¼
+ United Nations E/2004/93
+ Economic and Social Council Distr.: General
+ 14 July 2004
+ Original: English
+ 04-42475 (E) 140704
+ *0442475*
+ Substantive session of 2004
+ New York, 28 June-23 July 2004
+ Agenda item 13 (a)
+
+ è¾å
¥ï¼lines: list[str]ï¼detectä¸ä¼ å
¥çlinesï¼ä¸åèµè¿°
+ è¾åºï¼intï¼è¡¨ç¤ºä¸ä¸ªè¡ä¸æ ï¼æ建议ä»æ¤è¡å¼å§å¾åæé 第ä¸ä¸ªbatch
+ """
+ for lineid, leading_line in enumerate(lines[:self.LEADING_NOISE_SCAN_LINE_LIMIT]):
+ if leading_line.lower().find("agenda") != -1: #
+ return lineid + 1 # Agenda item xxæ¯ä¸ä¸ªæ¯è¾å¥½çleading noiseåæ£æçåç线ï¼è¿éå¤æå12è¡æ没æ
+
+ # 以ä¸ä¸ç³»åéå
æ¹æ³ä¸ºå¤æä¸è¡æ¯å¦ä¸ºåªå£°è¡çè§å
+ def match_static_pattern(line: str) -> bool:
+ """å¹é
éæå符串è§å"""
+ for static_pattern in [
+ 'Economic and Social Council Distr.:',
+ 'United Nations',
+ ]:
+ if static_pattern in line or line in static_pattern: # å
å«å
³ç³»ï¼è®¤ä¸ºæ»¡è¶³è§å
+ return True
+ matcher = SequenceMatcher(a=static_pattern, b=leading_line, autojunk=False)
+ if matcher.ratio() > 0.7: # ç¸ä¼¼å
³ç³»ï¼è®¤ä¸ºæ»¡è¶³è§å
+ return True
+ return False
+
+ def match_re_pattern(line: str) -> bool:
+ """å¹é
æ£åè§å"""
+ for re_pattern in [
+ re.compile(r'\d{1,2} [a-zA-Z]+ \d{4}'), # æ¥æ
+ re.compile(r'Original: [a-zA-Z]+') # æºè¯è¨
+ ]:
+ if re.search(re_pattern, line):
+ return True
+ return False
+
+ def low_en_proportion(line: str) -> bool:
+ """è±è¯åæ¯å æ¯è§å"""
+ return len(line) * 0.5 > len(re.findall(r'[a-zA-Z]', line)) # è±è¯åæ¯å æ¯å°äºæ´è¡é¿åº¦ä¸å
+
+ def short_line(line: str) -> bool:
+ """è¡é¿åº¦è§å"""
+ return len(line) < 40
+
+ # æ们è¿ç»çä»ä¸è³ä¸ä¸è¡è¡å¹é
å·²æçè§åï¼ä¸æ¦æä¸è¡ä¸æ»¡è¶³è§åï¼ååé¢çè¡æ们认为已ç»è¾¾å°äºæ£æè¡ï¼ç´æ¥è¿å
+ for lineid, leading_line in enumerate(lines[:self.LEADING_NOISE_SCAN_LINE_LIMIT]):
+ if not (
+ match_static_pattern(leading_line) or
+ match_re_pattern(leading_line) or
+ low_en_proportion(leading_line) or
+ short_line(leading_line)
+ ):
+ return lineid
+
+ return self.LEADING_NOISE_SCAN_LINE_LIMIT # å¦æå12è¡é½çä¼¼åªå£°è¡ï¼åè¿å第12è¡
+
+
+
def detect(self, lines: list[str], record_id: str, **kwargs) -> list[bool]:
"""
Applies the GPT-3.5 detection technique to the given lines.
@@ -300,6 +374,10 @@ def detect(self, lines: list[str], record_id: str, **kwargs) -> list[bool]:
new_batch_begin_lineid = 0
batch_id = 0
+ if self.ignore_leading:
+ new_batch_begin_lineid = self.ignore_first_page_leading_noises(lines)
+ print(f'[{record_id}]first batch begin at:{new_batch_begin_lineid}')
+
while new_batch_begin_lineid < len(lines):
batch = self.generate_batch(lines, new_batch_begin_lineid) # å©ç¨å·²æçç»æçæinput_batch
batch_line_count = batch.count('\n') + 1
@@ -308,7 +386,8 @@ def detect(self, lines: list[str], record_id: str, **kwargs) -> list[bool]:
break
# è·åæ段åºé´è¡¨
- segment_list = self.align_gpt_linebreak_detection_request(batch, record_id, batch_id)
+ segment_list = self.align_gpt_linebreak_detection_request(batch, record_id, batch_id,
+ drop_last_paragraph=next_lineid < len(lines)) # å¦ææ¯æåä¸æ¹ï¼å°±ä¸è¦ä¸¢ææåä¸ä¸ªå¤§æ®µ
for l_border, r_border in segment_list:
detections[new_batch_begin_lineid + l_border:new_batch_begin_lineid + r_border] = [False] * (r_border - l_border) # æ¯ä¸ªæ®µè½çåºé´èµå¼ä¸ºFalse
diff --git a/alignment/batch_sequential_for_one_file.py b/alignment/batch_sequential_for_one_file.py
new file mode 100644
index 0000000..1dc1354
--- /dev/null
+++ b/alignment/batch_sequential_for_one_file.py
@@ -0,0 +1,58 @@
+import argparse
+import os
+import json
+from pathlib import Path
+import logging
+import datasets
+from batch_sequential_detector import GPTBatchSequentialDetector
+
+
+logging.basicConfig(level=logging.INFO)
+
+LOCAL_WORK_DIR = Path(f'{os.path.dirname(os.path.abspath(__file__))}/batch_cache')
+LOCAL_WORK_DIR.mkdir(exist_ok=True)
+
+DATASET_CACHE_DIR = LOCAL_WORK_DIR / 'dataset'
+DATASET_CACHE_DIR.mkdir(exist_ok=True)
+
+DETECTOR_CACHE_DIR = LOCAL_WORK_DIR / 'batch_sequential_cache_dir'
+DETECTOR_CACHE_DIR.mkdir(exist_ok=True)
+
+DONE_DIR = LOCAL_WORK_DIR / 'done'
+DONE_DIR.mkdir(exist_ok=True)
+
+
+def get_and_cache_dataset(path='ranWang/un_pdf_random_preprocessed', split='train'):
+ """æhfçä¸è¥¿cacheå°å·¥ä½ç®å½ï¼é²æ¢dnsé»æ导è´ä¸è½éªè¯æ¬å°ç¼å"""
+ try:
+ dataset = datasets.load_from_disk(DATASET_CACHE_DIR)
+ except:
+ dataset = datasets.load_dataset(path, split=split)
+ dataset.save_to_disk(DATASET_CACHE_DIR)
+ return dataset
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--api_key', type=str, help='openai api key')
+ parser.add_argument('--dataset_index', type=int, help='ç´æ¥ç»ä¸æ å§ï¼0~15293')
+
+ args = parser.parse_args()
+
+ if args.dataset_index != 0 and not args.dataset_index:
+ raise ValueError("dataset_index must input")
+
+ if not args.api_key:
+ raise ValueError("api_key must input")
+
+ single_file_data = get_and_cache_dataset()[args.dataset_index]
+ record = single_file_data['record']
+
+
+ os.environ['OPENAI_API_KEY'] = args.api_key
+
+ detector = GPTBatchSequentialDetector('', cache_dir=DETECTOR_CACHE_DIR.absolute(), use_proxy=False, ignore_leading_noise_lines=True) # å¦æéè¦ç¨å代è¿éuse_proxyæ¹True
+ is_hard_linebreak: list[bool] = detector.detect(single_file_data['en'].splitlines(), record_id=record)
+
+ with (DONE_DIR / f'{record}.list').open('w') as f:
+ json.dump(is_hard_linebreak, f)
diff --git a/alignment/utils.py b/alignment/utils.py
index e24bb70..129d37a 100644
--- a/alignment/utils.py
+++ b/alignment/utils.py
@@ -58,7 +58,8 @@ def gpt_detect_hard_line_breaks(line_break_text: str, use_proxy: bool = False, r
Raises:
ExceededContextLength: If the context length is exceeded.
- UnknownError: If an unknown error occurs.
+ UnknownError: If an OpenAI side unknown error occurs.
+ Exception: If other unexpected error occurs.
Returns:
str: The AI model's response.
@@ -84,18 +85,29 @@ def gpt_detect_hard_line_breaks(line_break_text: str, use_proxy: bool = False, r
},
timeout = 60 * 5, verify=False
)
- logging.info(response.text)
+ logging.debug(response.text)
try:
response_json = response.json()
except json.JSONDecodeError:
response_json = json.loads('{' + response.text)
-
if 'error' in response_json:
error = response_json['error']
if 'code' in error and error['code'] == 'invalid_request_error':
raise ExceededContextLength(error['message'])
elif error.get('type') == 'server_error' and 'overloaded' in error.get('message', ''):
raise ServerOverloadedError(error['message']) # è¿ä¸ªé误æ¯å¯ä»¥æ¥ä½å¹¶ä¸éè¿sleep and retryæ¥è§£å³ç
+ elif error.get('type') == 'billing_not_active': # Tokenè¿æ ç´æ¥ææ
+ logging.fatal(f"OpenAI API Key not active: {error}")
+ print(f"OpenAI API Key not active: {error}")
+ exit(1)
+ elif error.get('type') == 'invalid_request_error': # API Keyæ ææè
å·²æ¤åå¯è½å¼èµ·è¿ä¸ªé误
+ logging.fatal(f"Invalid request (API Key maybe Invalid): {error}")
+ print(f"Invalid request (API Key maybe Invalid): {error}")
+ exit(1)
+ elif error.get('type') == 'insufficient_quota': # API Keyé
é¢ç¨å®
+ logging.fatal(f"OpenAI API Key quota exceeded: {error}")
+ print(f"OpenAI API Key quota exceeded: {error}")
+ exit(1)
else:
raise UnknownError(error['message'])
@@ -109,6 +121,23 @@ def gpt_detect_hard_line_breaks(line_break_text: str, use_proxy: bool = False, r
else:
logging.error(f"Request failed after {retries} retries.")
raise e
+ except UnknownError as e: # sample: The server had an error while processing your request. Sorry about that!
+ if i < retries - 1: # i is zero indexed
+ logging.error(f"OpenAI side unknown error occurred: {str(e)}, retrying.")
+ time.sleep(2)
+ continue
+ else:
+ logging.error(f"OpenAI side unknown error occurred after {retries} retries: {str(e)}.")
+ raise e
+ except Exception as e:
+ if i < retries - 1: # in case of other unknown exception that prevent running
+ logging.error(f"Unexpected error occurred: {str(e)}, retrying.")
+ time.sleep(2)
+ continue
+ else:
+ logging.error(f"Unexpected error occurred after {retries} retries: {str(e)}.")
+ raise e
+
# wait 10 sec between each retry
time.sleep(10)
@@ -146,7 +175,7 @@ def find_closest_within_margin(target, candidates, margin):
return None, None
-def index_near_match(indice_true, indice_pred, margin):
+def index_near_match(indice_true, indice_pred, margin=5):
"""
This function identifies and matches indices in 'indice_pred' that are closest to indices in 'indice_true',
within a specified margin. The offset is corrected each time an index is matched. The function returns two
diff --git a/requirements.txt b/requirements.txt
index 52fefa8..3fcf0e6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,5 @@ lxml
wandb
scikit-learn
pylcs
-tiktoken
\ No newline at end of file
+tiktoken
+
| fix: make sure script can run
| 2023-06-16T10:29:27 | 0.0 | [] | [] |
|||
AnasAito/SkillNER | AnasAito__SkillNER-45 | 1e01372b90b141ec5852c30c0308f638f68f6933 | diff --git a/skillNer/matcher_class.py b/skillNer/matcher_class.py
index dd366a2..8a48483 100644
--- a/skillNer/matcher_class.py
+++ b/skillNer/matcher_class.py
@@ -328,7 +328,8 @@ def get_low_match_skills(
for match_id, start, end in matcher(doc):
id_ = matcher.vocab.strings[match_id]
-
+ # handle skill in the end of phrase
+ start = start if start < len(text_obj) else start - 1
if text_obj[start].is_matchable:
skills.append({'skill_id': id_+'_lowSurf',
'doc_node_value': str(doc[start:end]),
| IndexError while annotate certain skill
**Describe the bug**
while using skill_extractor.annotate(text) to annotate certain skill it returns error
**To Reproduce**
Here are some examples to reproduce the behavior:
1.
text= "IDS/IPS sensors are collecting"
skill_extractor.annotate(text)
the main error is resulted from the skill "IDS"
| @GordonYeh39, Thanks for the snippet that reproduces the error.
There was a small bug that occurs when not handling accordingly low surface skill.
The bug is now fixed:

**P.S**: The fix will be taken into account in the next release.
| 2022-04-25T19:07:10 | 0.0 | [] | [] |
||
radomirbosak/duden | radomirbosak__duden-139 | e4501cc948f483edaa690541d723862a1be009e6 | diff --git a/duden/word.py b/duden/word.py
index fdfae7d..3eb6c7d 100755
--- a/duden/word.py
+++ b/duden/word.py
@@ -66,7 +66,7 @@ def urlname(self):
"""
Return unique representation of the word used in duden.de urls
"""
- return self.soup.head.link.attrs['href'].split('/')[-1]
+ return self.soup.head.find('link', rel='canonical').attrs['href'].split('/')[-1]
@property
def revision_url(self):
| Fix urlname lookup
At least in the `Keyboard` word
| 2021-10-03T00:45:53 | 0.0 | [] | [] |
|||
DessimozLab/omamer | DessimozLab__omamer-31 | 6a2f536618eb47e1f328a66b3e454e2ef024e7d9 | diff --git a/omamer/merge_search.py b/omamer/merge_search.py
index c73cc2b..9482651 100644
--- a/omamer/merge_search.py
+++ b/omamer/merge_search.py
@@ -530,6 +530,8 @@ def generate():
}
df = pd.DataFrame(generate())
+ if len(df) == 0:
+ return df
# cast to pd dtype so that we can use pd.NA...
df["qseq_offset"] = df["qseq_offset"].astype("UInt32")
| Crash when outputting results
I am running FastOMA, and I have got a crash from omamer with the following error message:
```
Searching - 20000 queries in 6:35.2 (39.09 queries/s)
Traceback (most recent call last):
File "/app/bin/omamer", line 8, in <module>
sys.exit(main())
^^^^^^
File "/app/lib/python3.11/site-packages/omamer/main.py", line 289, in main
args.func(args)
File "/app/lib/python3.11/site-packages/omamer/_runners.py", line 180, in search
df = ms.merge_search(
^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/omamer/merge_search.py", line 489, in merge_search
return self.output_results(
^^^^^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/omamer/merge_search.py", line 535, in output_results
df["qseq_offset"] = df["qseq_offset"].astype("UInt32")
~~^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/pandas/core/frame.py", line 4090, in __getitem__
indexer = self.columns.get_loc(key)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/pandas/core/indexes/range.py", line 417, in get_loc
raise KeyError(key)
KeyError: 'qseq_offset'
/app/lib/python3.11/site-packages/tables/file.py:113: UnclosedFileWarning: Closing remaining open file: LUCA.h5
warnings.warn(UnclosedFileWarning(msg))
```
I would appreciate your help with this.
Botond
Crash when outputting results
I am running FastOMA, and I have got a crash from omamer with the following error message:
```
Searching - 20000 queries in 6:35.2 (39.09 queries/s)
Traceback (most recent call last):
File "/app/bin/omamer", line 8, in <module>
sys.exit(main())
^^^^^^
File "/app/lib/python3.11/site-packages/omamer/main.py", line 289, in main
args.func(args)
File "/app/lib/python3.11/site-packages/omamer/_runners.py", line 180, in search
df = ms.merge_search(
^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/omamer/merge_search.py", line 489, in merge_search
return self.output_results(
^^^^^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/omamer/merge_search.py", line 535, in output_results
df["qseq_offset"] = df["qseq_offset"].astype("UInt32")
~~^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/pandas/core/frame.py", line 4090, in __getitem__
indexer = self.columns.get_loc(key)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/lib/python3.11/site-packages/pandas/core/indexes/range.py", line 417, in get_loc
raise KeyError(key)
KeyError: 'qseq_offset'
/app/lib/python3.11/site-packages/tables/file.py:113: UnclosedFileWarning: Closing remaining open file: LUCA.h5
warnings.warn(UnclosedFileWarning(msg))
```
I would appreciate your help with this.
Botond
| Hi @ens-sb,
could it be that you are using an old version of the LUCA.h5 database? can you test with `omamer info --db LUCA.h5` which version of the database you are using? it should be 2.0.*
if that is not correct, please update the LUCA database from https://omabrowser.org/oma/current/ . The file should be ~7.8GB. Otherwise, I need to look a bit more into this.
Best wishes
Adrian
Hi @alpae ,
Thanks for the prompt feedback! I have used the LUCA.h5 which was automatically downloaded by FastOMA from https://omabrowser.org/All/LUCA.h5 . I guess this must be the latest one.
Also, all but one proteomes successfuly completed the omamer search.
Best,
Botond
Hi Botond,
ok, if it works for many but one proteomes, there must be something special about this one... Are you able to share this proteome with us so that we can have a look into it more carefully?
Cheers Adrian
Hi Adrian,
I send attached the proteome which caused the trouble.
Botond
[spilosoma_lubricipeda_gca905220605v1.txt](https://github.com/DessimozLab/omamer/files/14791323/spilosoma_lubricipeda_gca905220605v1.txt)
oh wow, this is a nice edgecase situation. the output is generated in chunks of 10000 elements by default, and your proteome has exactly 20000 sequences. now, it tries to write an empty chunk, which fails brutally... ;-)
as a quick fix, you can specify a different chunksize, e.g. `--chunksize 9999` instead of 10000. I will fix now as well and will release a new version.
Thanks for reporting this.
Oh, that is a funny edge case indeed! I have excluded this proteome from my analysis for now, and will wait for the fix. Thanks for that in advance!
Botond
Hi @ens-sb,
could it be that you are using an old version of the LUCA.h5 database? can you test with `omamer info --db LUCA.h5` which version of the database you are using? it should be 2.0.*
if that is not correct, please update the LUCA database from https://omabrowser.org/oma/current/ . The file should be ~7.8GB. Otherwise, I need to look a bit more into this.
Best wishes
Adrian
Hi @alpae ,
Thanks for the prompt feedback! I have used the LUCA.h5 which was automatically downloaded by FastOMA from https://omabrowser.org/All/LUCA.h5 . I guess this must be the latest one.
Also, all but one proteomes successfuly completed the omamer search.
Best,
Botond
Hi Botond,
ok, if it works for many but one proteomes, there must be something special about this one... Are you able to share this proteome with us so that we can have a look into it more carefully?
Cheers Adrian
Hi Adrian,
I send attached the proteome which caused the trouble.
Botond
[spilosoma_lubricipeda_gca905220605v1.txt](https://github.com/DessimozLab/omamer/files/14791323/spilosoma_lubricipeda_gca905220605v1.txt)
oh wow, this is a nice edgecase situation. the output is generated in chunks of 10000 elements by default, and your proteome has exactly 20000 sequences. now, it tries to write an empty chunk, which fails brutally... ;-)
as a quick fix, you can specify a different chunksize, e.g. `--chunksize 9999` instead of 10000. I will fix now as well and will release a new version.
Thanks for reporting this.
Oh, that is a funny edge case indeed! I have excluded this proteome from my analysis for now, and will wait for the fix. Thanks for that in advance!
Botond | 2024-03-28T16:21:23 | 0.0 | [] | [] |
||
MTgeophysics/mtpy | MTgeophysics__mtpy-138 | 155fe31e5971df2894772e6059fc75f3e8aac33e | diff --git a/mtpy/modeling/occam2d.py b/mtpy/modeling/occam2d.py
index 6fde69533..7cef20f90 100644
--- a/mtpy/modeling/occam2d.py
+++ b/mtpy/modeling/occam2d.py
@@ -2464,11 +2464,12 @@ def _fill_data(self):
# loop over mt object in edi_list and use a counter starting at 1
# because that is what occam starts at.
for s_index, edi in enumerate(self.edi_list):
-
+ station_freq = edi.Z.freq
+ interp_freq = self.freq[np.where((self.freq >= station_freq.min()) &
+ (self.freq <= station_freq.max()))]
if self.freq_tol is None:
- station_freq = edi.Z.freq
- interp_freq = self.freq[np.where((self.freq >= station_freq.min()) &
- (self.freq <= station_freq.max()))]
+
+
# interpolate data onto given frequency list
z_interp, t_interp = edi.interpolate(interp_freq)
# z_interp._compute_res_phase()
@@ -2484,8 +2485,8 @@ def _fill_data(self):
tipper = None
tipper_err = None
else:
- station_freq = edi.Z.freq
rho = edi.Z.resistivity
+ rho_err = edi.Z.resistivity_err
phi = edi.Z.phase
tipper = edi.Tipper.tipper
tipper_err = edi.Tipper.tipper_err
@@ -2496,10 +2497,14 @@ def _fill_data(self):
for freq_num, frequency in enumerate(self.freq):
if self.freq_tol is not None:
try:
- f_index = np.where((station_freq >= frequency * (1 - self.freq_tol)) &
- (station_freq <= frequency * (1 + self.freq_tol)))[0][0]
+ # list of indices within tolerance
+ f_index_list = np.where((station_freq >= frequency * (1 - self.freq_tol)) &
+ (station_freq <= frequency * (1 + self.freq_tol)))[0]
+ # closest frequency
+ diff = np.abs(station_freq[f_index_list] - frequency)
+ f_index = f_index_list[np.where(diff==np.amin(diff))]
- except IndexError:
+ except ValueError:
f_index = None
else:
# skip, if the listed frequency is not available for the station
| freq_tol function when creating Occam2D/Mare2D data files
In the Occam2D.py - there is an freq_tol variable (a frequency tolerance as a percentage). This freq_tol is there so that if you have masked certain frequencies in your data, you have control over whether the data are interpolated at this frequency (or the given frequencies) or left without data at this frequency. At the moment, I can only produce occam (and subsequently Mare2D) using the develop branch when there is no freq_tol set. In other words, if I've masked data so that there is a gap, when I produce the occam data file the gap is always filled. Perhaps an alternative is interpolate_freq, but I'm not sure that the interpolate_freq (true,false) is still working in the latest version when generating a data file.
| 2021-03-02T23:33:52 | 0.0 | [] | [] |
|||
DIRACGrid/WebAppDIRAC | DIRACGrid__WebAppDIRAC-664 | d70c268ff47fc051ca29fd0415fcaeb0e7587f2a | diff --git a/src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py b/src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py
index 37f63d5ea..99f4998cb 100644
--- a/src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py
+++ b/src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py
@@ -3,52 +3,45 @@
import json
-from WebAppDIRAC.Lib.WebHandler import WebHandler, WErr, asyncGen
from DIRAC import gLogger
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
+from WebAppDIRAC.Lib.WebHandler import _WebHandler as WebHandler, WErr
class SpaceOccupancyHandler(WebHandler):
- AUTH_PROPS = "authenticated"
+ DEFAULT_AUTHORIZATION = "authenticated"
- @asyncGen
- def web_getSelectionData(self):
+ def initializeRequest(self):
+ self.rmc = ResourceManagementClient()
+
+ def web_getSelectionData(self, **kwargs):
callback = {
"StorageElement": set(),
}
- rmc = ResourceManagementClient()
-
- gLogger.info("Arguments to web_getSelectionData", repr(self.request.arguments))
-
- spaces = yield self.threadTask(rmc.selectSpaceTokenOccupancyCache)
+ gLogger.info("Arguments to web_getSelectionData", kwargs)
- if spaces["OK"]:
- for sp in spaces["Value"]:
- callback["StorageElement"].add(sp[1])
+ if (result := self.rmc.selectSpaceTokenOccupancyCache())["OK"]:
+ for space in result["Value"]:
+ callback["StorageElement"].add(space[1])
for key, value in callback.items():
callback[key] = [[item] for item in list(value)]
# callback[key].sort()
callback[key] = [["All"]] + callback[key]
- self.finish(callback)
-
- @asyncGen
- def web_getSpaceOccupancyData(self):
-
- rmc = ResourceManagementClient()
-
- se = json.loads(self.get_argument("StorageElement", "null"))
+ return callback
- res = yield self.threadTask(rmc.selectSpaceTokenOccupancyCache, None, list(se) if se else se)
+ def web_getSpaceOccupancyData(self, StorageElement="null"):
+ se = json.loads(StorageElement)
- if not res["OK"]:
- raise WErr.fromSERROR(res)
+ result = self.rmc.selectSpaceTokenOccupancyCache(None, list(se) if se else se)
+ if not result["OK"]:
+ raise WErr.fromSERROR(result)
resList = []
- for sp in res["Value"]:
+ for sp in result["Value"]:
# sp is e.g. ['dips://lbtestvobox.cern.ch:9196/',
# 'CertificationSandboxSE',
# 0.0,
@@ -82,4 +75,4 @@ def web_getSpaceOccupancyData(self):
resList.append(spRes)
- self.finish({"success": "true", "result": resList, "total": len(res["Value"])})
+ self.finish({"success": "true", "result": resList, "total": len(result["Value"])})
| Replace WebHandler with _WebHandler
Apologies if I haven't found where this is was discussed but it seems that we currently have two implementations of the `WebHandler`:
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/Lib/WebHandler.py#L161
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/Lib/WebHandler.py#L401
Most classes seem to be using `_WebHandler` which appears to be a much nicer implementation which delegates every request to a dedicated thread so the `@asyncGen` and ` yield self.threadTask` stuff isn't needed like in:
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py#L91-L92
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py#L104
Is there a reason why we need both implementations? I think we should just move the legacy style handlers to the new one and delete the old implementation.
| Yes, you are absolutely right, now the process of switching to `_WebHandler` is underway, as soon as it ends `WebHandler` is simply removed and `_WebHandler` will be renamed to `WebHandler`.
I think it would be good to deal with this before releasing v5.0 so we have a cleaner starting point fot v5.1 (which I have some ideas for).
Quickly checking these are the modules which need to be updated in vanilla DIRAC. I'll try to start working through them now starting from the top and checking them off:
- [x] `src/WebAppDIRAC/WebApp/handler/ConfigurationManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ProxyManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ProxyUploadHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/PublicStateManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/RegistryManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/RequestMonitorHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/TransformationMonitorHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/VMDiracHandler.py`
| 2022-06-18T15:28:29 | 0.0 | [] | [] |
||
oleneyl/maplestory_dpm_calc | oleneyl__maplestory_dpm_calc-726 | cdbafdefc057b8f4fca5afe49b4f684bd7cd8824 | diff --git a/dpmModule/jobs/kain.py b/dpmModule/jobs/kain.py
index f08a4cea7..9fceb8747 100644
--- a/dpmModule/jobs/kain.py
+++ b/dpmModule/jobs/kain.py
@@ -845,7 +845,7 @@ def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
ChasingShot = (
core.DamageSkill(
name="ì²´ì´ì± ì·",
- delay=960, # base delay 960, AS not applied
+ delay=840, # base delay 960, AS from buff not applied
damage=320,
hit=6 * 3,
cooltime=30000,
@@ -857,7 +857,7 @@ def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
SneakySniping = (
core.DamageSkill(
name="ì¤ëí¤ ì¤ëì´í",
- delay=420 + 270, # prepare.action + keydownend.action, need more check
+ delay=60 + 270, # 60 + keydownend.action
damage=175,
hit=10 * 5, # 10í, 5í ë°ë³µ
cooltime=40000,
@@ -869,7 +869,7 @@ def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
SneakySnipingRelease = (
core.DamageSkill(
name="[ë°í/ì²í] ì¤ëí¤ ì¤ëì´í",
- delay=420 + 270, # prepare.time + keydownend.action
+ delay=60 + 270, # 60 + keydownend.action
damage=200,
hit=12 * 5, # 12í, 5í ë°ë³µ
cooltime=60000,
@@ -1100,7 +1100,7 @@ def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
StrikeArrowRelease,
]
+ [PoisonNeedle, ChainSickle, TearingKnife, PhantomBlade]
- + [ChasingShot, SneakySniping, ShaftBreak, ScatteringShot, FallingDust]
+ + [ChasingShot, SneakySniping, FallingDust, ScatteringShot, ShaftBreak]
+ [
RemainIncense,
DeathBlessingBonus,
| ì¹´ì¸ ì¤í¬ ëë ì´ ì ë³´
1. ì²´ì´ì± ì·
ì¡ì
ëë ì´ê° ë¶ì¤í°ê° ìì´ë ìì´ë 840msë¡ ì¸¡ì ë©ëë¤. 무기공ìê¹ì§ë ì ì©ëëê² ë§ëê² ê°ìµëë¤.
2. ì¤ëí¤ ì¤ëì´í
ì§§ê² ì
ë ¥ ì ìµì ë°ì ëë ì´ 60ms + ë§í 270msë¡ ì´ 330ms ì¶ì ë©ëë¤.
íì ëì¬ì´ë ìí¬ ê³ íµê°ì í¤ë¤ì´ë§í ì¤í¬ë¤ì ì§§ê² ì¹ë©´ ìµì 30ms + ê³µìì ì© ë§í ëë ì´ë¡ ì ì©ëê³ ì¸¡ì ë ì ëëë°
ìë ì무리 ê±·ê³ ë°ê³ ë
¹ííë ìì ë°ê¿ë´ë ë©ëëë ìì¹ë¡ ìë ´ì´ ìë¼ì ì íí íì¸ì´ ì´ë µìµëë¤... ì¼ë¨ ìëíë ì¼ë¶ ìììë£ì ìê°ê¸°ë¡ì 첨ë¶í©ëë¤.
[ìììë£.zip](https://docs.google.com/uc?export=download&id=1U09YZ4gEhyi2VO1ZLh1s4evO_SxsERBP)
[ìê°ê¸°ë¡.xlsx](https://github.com/oleneyl/maplestory_dpm_calc/files/5799865/default.xlsx)
| 2021-01-12T09:18:04 | 0.0 | [] | [] |
|||
gabrieldemarmiesse/python-on-whales | gabrieldemarmiesse__python-on-whales-278 | 00f093b310cbef0475f0e6c8dde6f64a13b10bdf | diff --git a/python_on_whales/components/compose/cli_wrapper.py b/python_on_whales/components/compose/cli_wrapper.py
index ace904b0..a5452b14 100644
--- a/python_on_whales/components/compose/cli_wrapper.py
+++ b/python_on_whales/components/compose/cli_wrapper.py
@@ -183,10 +183,15 @@ def push(self, services: List[str] = []):
full_cmd += services
run(full_cmd)
- def restart(self, timeout: Union[int, timedelta, None]):
- """Restart all containers
+ def restart(
+ self,
+ services: Union[str, List[str]] = [],
+ timeout: Union[int, timedelta, None] = None,
+ ):
+ """Restart containers
# Arguments
+ services: The names of one or more services to restart (str or list of str)
timeout: The shutdown timeout (`int` are interpreted as seconds).
`None` means the CLI default value (10s).
See [the docker stop docs](https://docs.docker.com/engine/reference/commandline/stop/)
@@ -198,6 +203,7 @@ def restart(self, timeout: Union[int, timedelta, None]):
timeout = int(timeout.total_seconds())
full_cmd.add_simple_arg("--timeout", timeout)
+ full_cmd += to_list(services)
run(full_cmd)
def rm(
| docker compose restart
Hello,
another command that I would like to use (yes I know... my list of commands is pretty long :stuck_out_tongue: )
In the docs it is reported as _Not yet implemented_ but I found it mostly completed
I think that the only missing things are:
- the services parameter
- timeout should be defaulted to None
Here the corresponding commit on my fork
https://github.com/mdantonio/python-on-whales/commit/7bc1168dd8b2aaca51a67d5f6d1b677bc8e52d48
Can I ask you to consider the fixes? Also let me know if you prefer a PR and if I have to think about some tests
Thank you, as usual!
| You're in luck, it's already implemented in master. I'll make a release today so that you can use it :)
Wow, great!
Hello,
maybe a little misunderstanding: the released version in v0.30.0 is exactly the implementation I was referring to... so I fear that the issue should be reopened
The timeout parameter is mandatory but it should not, so I think that you should default the parameter to None
The implementation only allows to restart ALL the services. Well, the help from the docker compose restart command is not clear and does not mention the services but it accepts a list of services like any other command, so I think that you should add the services parameter.
I tested both the fixes in the commit that I linked yesterday, could you consider to integrate such fixes?
Sure thing, I'll fix it :)
If you're willing to and you have the time, I would be very grateful for pull requests as it's much faster for me to review than to write the pull request myself. It helps when I have busy weeks with little free time :) | 2021-11-05T16:19:23 | 0.0 | [] | [] |
||
mindsdb/mindsdb | mindsdb__mindsdb-10062 | de85a16aa9b26eb4ca425cbbda37b6fe47bb2bec | diff --git a/mindsdb/integrations/handlers/derby_handler/derby_handler.py b/mindsdb/integrations/handlers/derby_handler/derby_handler.py
index 4f854078319..602b90cc4a0 100644
--- a/mindsdb/integrations/handlers/derby_handler/derby_handler.py
+++ b/mindsdb/integrations/handlers/derby_handler/derby_handler.py
@@ -168,6 +168,9 @@ def query(self, query: ASTNode) -> StatusResponse:
else:
query_str = str(query)
+ # Replace backticks with double quotes for Derby compatibility
+ query_str = query_str.replace("`", '"')
+
return self.native_query(query_str)
| [Bug]: Unable select specific columns in Apache Derby
### Short description of current behavior
Follow up: #10031
### All columns selection (works fine â
)

### Specific column selection (Not fine â)

### Video or screenshots
_No response_
### Expected behavior
_No response_
### How to reproduce the error
_No response_
### Anything else?
_No response_
| 2024-10-28T11:29:38 | 0.0 | [] | [] |
|||
metricq/metricq-tools | metricq__metricq-tools-13 | d59182c997e29f97e611b33ed8ab286e5eebd8e1 | diff --git a/README.md b/README.md
index 5a07334..8fadd42 100644
--- a/README.md
+++ b/README.md
@@ -7,10 +7,103 @@ MetricQ Tools
[](https://pypi.org/project/metricq-tools/)

-Tools and utility scripts to monitor and administrate a MetricQ network.
+Tools and utility scripts to utilize, monitor, and administrate a MetricQ network.
-This repository includes a Python package that installs the following
-executables:
+
+Common command line options
+---------------------------
+
+```
+ --server URL MetricQ server URL. [required]
+ --token CLIENT_TOKEN A token to identify this client on the MetricQ
+ network. [default: depends on the tool]
+ -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG
+ --version Show the version and exit.
+ --help Show this message and exit.
+```
+
+All options for these tools can be passed as environment variables prefixed with `METRICQ_`,
+i.e., `METRICQ_SERVER=amqps://...`.
+You can also create a `.metricq` file in the current or home directory that contains environment variable settings in the same format.
+Some options, including server and token, can contain placeholders for `$USER` and `$HOST`.
+
+
+User tools
+----------
+
+`metricq-energy`
+----------------
+
+Run a command and calculate the energy consumption of a metric during this execution.
+
+```
+Usage: metricq-energy [OPTIONS] COMMAND...
+
+ Get a single energy value for a metric during the execution of the given
+ command. This value is just the integral of the metric over the time the
+ command was running.
+
+ The integral is calculated as the product of the arithmetic mean of all
+ values times the runtime of the given command. For the result to be
+ accurate, the metric should have updates in regular intervals and there
+ should be a sufficient number of values for the duration of the command.
+
+Options:
+ -m, --metric TEXT [required]
+ --expires INTEGER Queue expiration time in seconds. Set this value to
+ the maximum time the command is expected to run.
+```
+
+`metricq-slurm`
+---------------
+
+Get the energy consumption for SLURM jobs.
+
+```
+Usage: metricq-slurm [OPTIONS]
+
+ Get an energy value for a slurm job given its job id.
+
+ This only works for exclusive jobs.
+
+Options:
+ -m, --metric TEXT Pattern for per-metric power consumption. $HOST will
+ be replaced with the host(s) running the job. The
+ metric is assumed to be in W (watts). [required]
+ -j, --jobs TEXT job(.step) or list of job(.steps) as per sacct
+ [required]
+```
+
+`metricq-summary`
+-----------------
+
+Run a command and collect statistics about a given metric during this execution.
+
+```
+Usage: metricq-summary [OPTIONS] COMMAND...
+
+ Live metric data analysis and inspection on the MetricQ network.
+
+ Consumes new data points for the given metric as they are submitted to the
+ network, prints a statistical overview on exit.
+
+Options:
+ -i, --intervals-histogram / -I, --no-intervals-histogram
+ Show an histogram of the observed
+ distribution of durations between data
+ points.
+ -h, --values-histogram / -H, --no-values-histogram
+ Show an histogram of the observed metric
+ values.
+ -d, --print-data-points / -D, --no-print-data-points
+ -s, --print-statistics / -S, --no-print-statistics
+ -m, --metric TEXT [required]
+```
+
+Administrator tools
+-------------------
+
+These tools are intended for debugging and monitoring MetricQ networks.
`metricq-check`
---------------
@@ -21,11 +114,6 @@ Uses the aggregation of persisted metric values to quickly check, if it contains
Usage: metricq-check [OPTIONS]
Check metrics for non-finite values.
-
-Options:
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG
- --server URL MetricQ server URL. [default: amqp://localhost/]
- --help Show this message and exit.
```
`metricq-discover`
@@ -40,17 +128,12 @@ Usage: metricq-discover [OPTIONS]
clients.
Options:
- --version Show the version and exit.
- --server URL MetricQ server URL. [default:
- amqp://localhost/]
-d, --diff JSON_FILE Show a diff to a list of previously discovered
clients (produced with --format=json)
-t, --timeout DURATION Wait at most this long for replies.
--format (pretty|json) Print results in this format [default:
(pretty)]
--ignore (error-responses) Messages to ignore.
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG
- --help Show this message and exit.
```
`metricq-inspect`
@@ -67,10 +150,6 @@ Usage: metricq-inspect [OPTIONS] METRIC
network, prints a statistical overview on exit.
Options:
- --server URL MetricQ server URL. [default:
- amqp://localhost/]
- --token CLIENT_TOKEN A token to identify this client on the
- MetricQ network. [default: metricq-inspect]
-i, --intervals-histogram / -I, --no-intervals-histogram
Show an histogram of the observed
distribution of durations between data
@@ -82,10 +161,6 @@ Options:
Show an histogram of the observed chunk
sizes of all messages received.
-d, --print-data-points / -D, --no-print-data-points
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or
- DEBUG
- --version Show the version and exit.
- --help Show this message and exit.
```
`metricq-send`
@@ -99,13 +174,7 @@ Usage: metricq-send [OPTIONS] METRIC VALUE
Send a single time-value pair for the given metric.
Options:
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG
- --version Show the version and exit.
- --server URL MetricQ server URL. [default: amqp://localhost/]
- --token CLIENT_TOKEN A token to identify this client on the MetricQ
- network. [default: source-send]
--timestamp TIMESTAMP Timestamp to send. [default: (now)]
- --help Show this message and exit.
```
`metricq-spy`
@@ -119,42 +188,6 @@ Usage: metricq-spy [OPTIONS] METRICS...
Obtain metadata and storage location for a set of metrics.
Options:
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or DEBUG
- --server URL MetricQ server URL. [default: amqp://localhost/]
--format (pretty|json) Print results in this format [default: (pretty)]
- --help Show this message and exit.
```
-`metricq-summary`
------------------
-
-Live metric data analysis and inspection on the MetricQ network.
-
-```
-Usage: metricq-summary [OPTIONS] COMMAND...
-
- Live metric data analysis and inspection on the MetricQ network.
-
- Consumes new data points for the given metric as they are submitted to the
- network, prints a statistical overview on exit.
-
-Options:
- -v, --verbosity LVL Either CRITICAL, ERROR, WARNING, INFO or
- DEBUG
- --server URL MetricQ server URL. [default:
- amqp://localhost/]
- --token CLIENT_TOKEN A token to identify this client on the
- MetricQ network. [default: metricq-summary]
- -i, --intervals-histogram / -I, --no-intervals-histogram
- Show an histogram of the observed
- distribution of durations between data
- points.
- -h, --values-histogram / -H, --no-values-histogram
- Show an histogram of the observed metric
- values.
- -d, --print-data-points / -D, --no-print-data-points
- -s, --print-statistics / -S, --no-print-statistics
- -m, --metric TEXT [required]
- --version Show the version and exit.
- --help Show this message and exit.
-```
diff --git a/metricq_tools/check.py b/metricq_tools/check.py
index 89982ac..754b0eb 100644
--- a/metricq_tools/check.py
+++ b/metricq_tools/check.py
@@ -57,7 +57,3 @@ def main(server: str, token: str) -> None:
)
asyncio.run(check_for_non_finite(client))
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/discover.py b/metricq_tools/discover.py
index fe5b8c4..3bf641c 100755
--- a/metricq_tools/discover.py
+++ b/metricq_tools/discover.py
@@ -351,7 +351,3 @@ def main(
ignored_events=set(event for event in ignore),
)
)
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/inspect.py b/metricq_tools/inspect.py
index decd431..4b23cde 100755
--- a/metricq_tools/inspect.py
+++ b/metricq_tools/inspect.py
@@ -229,7 +229,3 @@ def main(
print_data=print_data_points,
)
sink.run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/send.py b/metricq_tools/send.py
index 0335d2a..989d27f 100755
--- a/metricq_tools/send.py
+++ b/metricq_tools/send.py
@@ -44,7 +44,3 @@ def main(
)
send.run()
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/slurm.py b/metricq_tools/slurm.py
new file mode 100644
index 0000000..ed3fa7d
--- /dev/null
+++ b/metricq_tools/slurm.py
@@ -0,0 +1,184 @@
+import asyncio
+import datetime
+import math
+from dataclasses import dataclass
+from string import Template
+from typing import Optional
+
+import click
+import metricq
+from hostlist import expand_hostlist # type: ignore
+from tabulate import tabulate
+
+from .logging import logger
+from .utils import metricq_command
+from .version import version as client_version
+
+
+def _parse_slurm_timestamp(timestamp: str) -> Optional[metricq.Timestamp]:
+ if timestamp == "":
+ return None
+ return metricq.Timestamp.from_local_datetime(
+ datetime.datetime.fromisoformat(timestamp)
+ )
+
+
+def _check_energy(aggregate: metricq.TimeAggregate) -> float:
+ if aggregate.count == 0:
+ logger.error("No data points for energy computation.")
+ return math.nan
+ if aggregate.count < 10:
+ logger.warning(
+ "Few data points {}, likely due to short job duration. Energy may be inaccurate.",
+ aggregate.count,
+ )
+ if aggregate.minimum < 0:
+ logger.warning(
+ "Minimum power {} is negative, energy may be incorrect.",
+ aggregate.minimum,
+ )
+ return aggregate.integral_s
+
+
+@dataclass()
+class SlurmJobEntry:
+ def __init__(self, row: str):
+ (
+ self.job_id,
+ self.job_name,
+ start_str,
+ end_str,
+ hostlist_str,
+ ) = row.split("|")
+ self.start = _parse_slurm_timestamp(start_str)
+ self.end = _parse_slurm_timestamp(end_str)
+ if hostlist_str in ["", "None assigned"]:
+ self.hostlist = []
+ else:
+ self.hostlist = expand_hostlist(hostlist_str)
+
+ job_id: str
+ job_name: str
+ start: Optional[metricq.Timestamp]
+ end: Optional[metricq.Timestamp]
+ hostlist: list[str]
+ energy: float = math.nan
+
+ @property
+ def energy_str(self) -> str:
+ if math.isnan(self.energy):
+ return "N/A"
+ return f"{self.energy:.1f}"
+
+ async def collect_energy(
+ self, client: metricq.HistoryClient, metric_template: Template
+ ) -> None:
+ if self.job_id.endswith(".extern") or self.job_id.endswith(".batch"):
+ return
+ if not self.hostlist:
+ logger.warning(
+ "Job {} has no hostlist, cannot compute energy.", self.job_id
+ )
+ return
+ if self.start is None or self.end is None:
+ logger.warning(
+ "Job {} has not finished yet, cannot compute energy.", self.job_id
+ )
+ return
+
+ results = await asyncio.gather(
+ *[
+ client.history_aggregate(
+ metric=metric_template.substitute({"HOST": host}),
+ start_time=self.start,
+ end_time=self.end,
+ )
+ for host in self.hostlist
+ ]
+ )
+ energy_values = [_check_energy(a) for a in results]
+ self.energy = sum(energy_values)
+
+
+async def get_slurm_data(jobs: str) -> list[SlurmJobEntry]:
+ command = [
+ "sacct",
+ "--format",
+ "JobID,JobName,Start,End,NodeList",
+ "--jobs",
+ jobs,
+ "--noheader",
+ "--parsable2",
+ ]
+ logger.debug("Running command {}", command)
+ proc = await asyncio.create_subprocess_exec(
+ *command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
+ )
+
+ stdout, stderr = await proc.communicate()
+
+ if stderr:
+ logger.error("SLURM error output: '{}'", stderr.decode())
+
+ if proc.returncode == 0:
+ logger.info("{!r} exited with {}", command, proc.returncode)
+ else:
+ logger.error("{!r} exited with {}", command, proc.returncode)
+
+ return [SlurmJobEntry(line) for line in stdout.decode().splitlines()]
+
+
+async def slurm_energy(
+ client: metricq.HistoryClient, jobs: str, metric_template: Template
+) -> None:
+ jobs_data = await get_slurm_data(jobs)
+ async with client:
+ await asyncio.gather(
+ *[j.collect_energy(client, metric_template) for j in jobs_data]
+ )
+ table_header = ["JobID", "Job Name", "Energy"]
+ table_data = [
+ [j.job_id, j.job_name, j.energy_str]
+ for j in jobs_data
+ if not math.isnan(j.energy)
+ ]
+ print(tabulate(table_data, headers=table_header, disable_numparse=True))
+
+
+@metricq_command(default_token="history-$USER-tool-slurm")
[email protected](
+ "-m",
+ "--metric",
+ type=str,
+ required=True,
+ multiple=False,
+ help=(
+ "Pattern for per-metric power consumption. "
+ "$HOST will be replaced with the host(s) running the job. "
+ "The metric is assumed to be in W (watts)."
+ ),
+)
[email protected](
+ "-j",
+ "--jobs",
+ type=str,
+ required=True,
+ help="job(.step) or list of job(.steps) as per sacct",
+)
+def main(
+ server: str,
+ token: str,
+ metric: str,
+ jobs: str,
+) -> None:
+ """
+ Get an energy value for a slurm job given its job id.
+
+ This only works for exclusive jobs.
+ """
+ client = metricq.HistoryClient(
+ token=token,
+ url=server,
+ client_version=client_version,
+ )
+ asyncio.run(slurm_energy(client, jobs=jobs, metric_template=Template(metric)))
diff --git a/metricq_tools/spy.py b/metricq_tools/spy.py
index 9b89d86..4a35e12 100755
--- a/metricq_tools/spy.py
+++ b/metricq_tools/spy.py
@@ -105,7 +105,3 @@ def main(server: str, token: str, format: OutputFormat, metrics: list[str]) -> N
spy = MetricQSpy(token=token, url=server)
asyncio.run(spy.spy(metrics, output_format=format))
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/summary.py b/metricq_tools/summary.py
index fa04f6a..6518b8f 100755
--- a/metricq_tools/summary.py
+++ b/metricq_tools/summary.py
@@ -254,7 +254,3 @@ def main(
)
exit(returncode)
-
-
-if __name__ == "__main__":
- main()
diff --git a/metricq_tools/utils.py b/metricq_tools/utils.py
index 8030f0c..51e51c5 100644
--- a/metricq_tools/utils.py
+++ b/metricq_tools/utils.py
@@ -218,8 +218,7 @@ def metricq_server_option() -> Callable[[FC], FC]:
"--server",
type=TemplateStringParam(),
metavar="URL",
- default="amqp://localhost/",
- show_default=True,
+ required=True,
help="MetricQ server URL.",
)
diff --git a/setup.cfg b/setup.cfg
index c4b5dd8..afe4c2c 100755
--- a/setup.cfg
+++ b/setup.cfg
@@ -24,6 +24,7 @@ install_requires =
humanize~=2.5
python-dateutil~=2.8
python-dotenv~=1.0.0
+ python-hostlist
numpy
termplotlib
tabulate
@@ -36,6 +37,7 @@ console_scripts =
metricq-energy = metricq_tools.energy:main
metricq-inspect = metricq_tools.inspect:main
metricq-send = metricq_tools.send:main
+ metricq-slurm = metricq_tools.slurm:main
metricq-spy = metricq_tools.spy:main
metricq-summary = metricq_tools.summary:main
| Slurm job energy summary
input: slurm job id
output: energy accounting summary
| 2023-06-09T10:24:09 | 0.0 | [] | [] |
|||
ambianic/peerjs-python | ambianic__peerjs-python-38 | 6ad423985459f32de226684d8865d18892d9e0b3 | diff --git a/.gitpod.yml b/.gitpod.yml
new file mode 100644
index 0000000..cf829e9
--- /dev/null
+++ b/.gitpod.yml
@@ -0,0 +1,3 @@
+tasks:
+ - init: 'echo "TODO: Replace with init/build command"'
+ command: 'echo "TODO: Replace with command to start project"'
diff --git a/README.md b/README.md
index 0d27421..9c84028 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
+[](https://gitpod.io/#https://github.com/ambianic/peerjs-python)
+
# peerjs-python
Python port of [PeerJS](https://github.com/peers) client.
@@ -60,6 +62,55 @@ Initial working prototype completed. PeerJS Python is now able to connect over W
- [ ] >90% code coverage with CI tests.
- [ ] Port media support.
+## Code Examples
+
+A typical p2p session takes these steps:
+1. Establish signaling server session that enables peers to discover each other.
+2. Discover remote peer ID (either via signaling server room affinity or other means)
+3. Request connection to remote peer via signaling server
+4. Connect to remote peer via WebRTC ICE protocol.
+5. Exchange data or media with remote peer over p2p WebRTC connection.
+
+The following code snippet shows the initial part of establishing a signaling server connection.
+
+```
+ options = PeerOptions(
+ host=config['host'],
+ port=config['port'],
+ secure=config['secure'],
+ token=new_token,
+ config=RTCConfiguration(
+ iceServers=[RTCIceServer(**srv) for srv in config['ice_servers']]
+ )
+ )
+ peer = Peer(id=savedPeerId, peer_options=options)
+ await peer.start()
+ log.info('peer activated')
+ _setPnPServiceConnectionHandlers(peer)
+```
+
+Once a signaling server connection is established, a peer can request connection to another peer or listen for requests from a remote peer.
+The example snippet bellow shows the latter:
+
+```
+ @peer.on(PeerEventType.Connection)
+ async def peer_connection(peerConnection):
+ log.info('Remote peer trying to establish connection')
+ _setPeerConnectionHandlers(peerConnection)
+```
+
+After a p2p connection is established, a peer can receive and send application messages. The following snippet shows how a peer receives a message:
+
+```
+ @peerConnection.on(ConnectionEventType.Data)
+ async def pc_data(data):
+ log.debug('data received from remote peer \n%r', data)
+```
+
+For a complete working example see [this file](https://github.com/ambianic/peerjs-python/blob/master/src/peerjs/ext/http-proxy.py).
+
+
+
## Other Related Open Source projects
There are several great projects that solve the problem of accessing IoT devices behind firewall via tunneling servers.
diff --git a/src/peerjs/ext/http-proxy.py b/src/peerjs/ext/http-proxy.py
index c0e0006..ccaf845 100644
--- a/src/peerjs/ext/http-proxy.py
+++ b/src/peerjs/ext/http-proxy.py
@@ -162,9 +162,6 @@ async def peer_disconnected(peerId):
'Resetting to last known ID.')
peer._id = savedPeerId
peer._lastServerId = savedPeerId
- global _is_shutting_down
- if not _is_shutting_down:
- await peer.reconnect()
@peer.on(PeerEventType.Close)
def peer_close():
| Intermittent errors connecting to signaling server
```
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(274): Peer destroyed. Will create a new peer.
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(239): creating peer
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(244): last saved savedPeerId c559151b-eb8d-468a-91b0-7db3baf25dd2
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(246): Peer session token 9a515af4-8c7d-4a09-80af-b188e8770407
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(256): pnpService: peer created with id c559151b-eb8d-468a-91b0-7db3baf25dd2 , options: PeerOptions(host='ambianic-pnp.herokuapp.com', port=443, path='/', key='peerjs', token='9a515af4-8c7d-4a09-80af-b188e8770407', config=RTCConfiguration(iceServers=[RTCIceServer(urls=['stun:stun1.l.google.com:19302', 'stun:stun2.l.google.com:19302', 'stun:stun3.l.google.com:19302', 'stun:stun4.l.google.com:19302', 'stun:stun.l.google.com:19302', 'stun:stun.services.mozilla.com:3478', 'stun:stunserver.org:3478'], username=None, credential=None, credentialType='password'), RTCIceServer(urls=['turn:numb.viagenie.ca'], username='[email protected]', credential='muazkh', credentialType='password'), RTCIceServer(urls=['turn:192.158.29.39:3478?transport=udp'], username='28224511:1379330808', credential='JZEOEt2V3Qb0y27GRntt2u2PAYA=', credentialType='password'), RTCIceServer(urls=['turn:192.158.29.39:3478?transport=tcp'], username='28224511:1379330808', credential='JZEOEt2V3Qb0y27GRntt2u2PAYA=', credentialType='password'), RTCIceServer(urls=['turn:turn.bistri.com:80'], username='homeo', credential='homeo', credentialType='password'), RTCIceServer(urls=['turn:turn.anyfirewall.com:443?transport=tcp'], username='webrtc', credential='webrtc', credentialType='password'), RTCIceServer(urls=['turn:0.peerjs.com:3478'], username='peerjs', credential='peerjsp', credentialType='password')]), secure=True, pingInterval=5)
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: c559151b-eb8d-468a-91b0-7db3baf25dd2
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(258): peer activated
ambianic-edge | 2020-05-21 23:30:06 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 362, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-05-21 23:30:06 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | 2020-05-21 23:30:06 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | 2020-05-21 23:30:06 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(128): Peer error ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-05-21 23:30:06 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(129): peerConnectionStatus None
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(124): Peer connection closed
ambianic-edge | 2020-05-21 23:30:06 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(110): Peer c559151b-eb8d-468a-91b0-7db3baf25dd2 disconnected from server.
ambianic-edge | 2020-05-21 23:30:06 ERROR /usr/lib/python3.7/asyncio/base_events.py.default_exception_handler(1608): Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55
ambianic-edge | handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55>
ambianic-edge | Traceback (most recent call last):
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py", line 62, in _callback
ambianic-edge | self.emit('error', exc)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 111, in emit
ambianic-edge | self._emit_handle_potential_error(event, args[0] if args else None)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 83, in _emit_handle_potential_error
ambianic-edge | raise error
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 119, in peer_disconnected
ambianic-edge | await peer.reconnect()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 537, in reconnect
ambianic-edge | raise RuntimeError("This peer cannot reconnect to the server. "
ambianic-edge | RuntimeError: This peer cannot reconnect to the server. It has already been destroyed.
^CERROR: Aborting.
```
| ```
clientsIds': ['c559151b-eb8d-468a-91b0-7db3baf25dd2', '7ab48f24-668a-4a21-aec8-beeab802dbad']}
ambianic-edge | 2020-05-20 20:25:40 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.join_peer_room(64): myRoom members {'
clientsIds': ['c559151b-eb8d-468a-91b0-7db3baf25dd2', '7ab48f24-668a-4a21-aec8-beeab802dbad']}
ambianic-edge | 2020-05-20 20:25:50 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(286): Error while
trying to join local peer room. Will retry in a few moments. Error:
ambianic-edge | ConnectionError('Unexpected status code 401 for https://ambianic-pnp.herokuapp.com:443/peerjs/c559151b-eb8d-468a-91b0-7db3baf2
5dd2/dc3ebbab-2ccd-4f0d-94bc-5b9b8cac5943/room/id?ts=2148735.2558159370.10154805004736134')
ambianic-edge | Traceback (most recent call last):
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 277, in make_discoverable
ambianic-edge | await join_peer_room(peer=peer)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 63, in join_peer_room
ambianic-edge | peerIds = await myRoom.getRoomMembers()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peerroom.py", line 74, in getRoomMembers
ambianic-edge | members = await self.join()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peerroom.py", line 84, in join
ambianic-edge | self._roomId = await self._getRoomId()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peerroom.py", line 48, in _getRoomId
ambianic-edge | result = await self._restCall(rest_method=rest_method)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peerroom.py", line 29, in _restCall
ambianic-edge | raise ConnectionError(f'Unexpected status code {status} '
ambianic-edge | ConnectionError: Unexpected status code 401 for https://ambianic-pnp.herokuapp.com:443/peerjs/c559151b-eb8d-468a-91b0-7db3baf2
5dd2/dc3ebbab-2ccd-4f0d-94bc-5b9b8cac5943/room/id?ts=2148735.2558159370.10154805004736134
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(290): Peer connecti
on was corrupted. Detroying peer.
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(110): Peer c559151b
-eb8d-468a-91b0-7db3baf25dd2 disconnected from server.
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote peer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pc_close(233): Connection to remote p
eer closed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/aioice/turn.py.delete(153): TURN allocation deleted ('158.69.2
21.198', 52157)
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(124): Peer connection clos
ed
ambianic-edge | 2020-05-20 20:25:50 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: c559151b-eb
8d-468a-91b0-7db3baf25dd2
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(274): Peer destroye
d. Will create a new peer.
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(239): creating pe
er
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(244): last saved
savedPeerId c559151b-eb8d-468a-91b0-7db3baf25dd2
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(246): Peer sessio
n token c39311f9-dcb6-42ba-81ef-001dcadcce97
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(256): pnpService:
peer created with id c559151b-eb8d-468a-91b0-7db3baf25dd2 , options: PeerOptions(host='ambianic-pnp.herokuapp.com', port=443, path='/', key='peerjs', t
oken='c39311f9-dcb6-42ba-81ef-001dcadcce97', config=RTCConfiguration(iceServers=[RTCIceServer(urls=['stun:stun1.l.google.com:19302', 'stun:stun2.l.googl
e.com:19302', 'stun:stun3.l.google.com:19302', 'stun:stun4.l.google.com:19302', 'stun:stun.l.google.com:19302', 'stun:stun.services.mozilla.com:3478', '
stun:stunserver.org:3478'], username=None, credential=None, credentialType='password'), RTCIceServer(urls=['turn:numb.viagenie.ca'], username='webrtc@li
ve.com', credential='muazkh', credentialType='password'), RTCIceServer(urls=['turn:192.158.29.39:3478?transport=udp'], username='28224511:1379330808', c
redential='JZEOEt2V3Qb0y27GRntt2u2PAYA=', credentialType='password'), RTCIceServer(urls=['turn:192.158.29.39:3478?transport=tcp'], username='28224511:13
79330808', credential='JZEOEt2V3Qb0y27GRntt2u2PAYA=', credentialType='password'), RTCIceServer(urls=['turn:turn.bistri.com:80'], username='homeo', crede
ntial='homeo', credentialType='password'), RTCIceServer(urls=['turn:turn.anyfirewall.com:443?transport=tcp'], username='webrtc', credential='webrtc', cr
edentialType='password'), RTCIceServer(urls=['turn:0.peerjs.com:3478'], username='peerjs', credential='peerjsp', credentialType='password')]), secure=Tr
ue, pingInterval=5)
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: c559151b-eb
8d-468a-91b0-7db3baf25dd2
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(258): peer activa
ted
ambianic-edge | 2020-05-20 20:25:53 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 362, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-05-20 20:25:53 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$c5591
51b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | 2020-05-20 20:25:53 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$c559151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | 2020-05-20 20:25:53 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(128): Peer error ID "$c55
9151b-eb8d-468a-91b0-7db3baf25dd2" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-05-20 20:25:53 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(129): peerConnectionSta
tus None
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(124): Peer connection clos
ed
ambianic-edge | 2020-05-20 20:25:53 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(110): Peer c559151b
-eb8d-468a-91b0-7db3baf25dd2 disconnected from server.
ambianic-edge | 2020-05-20 20:25:53 ERROR /usr/lib/python3.7/asyncio/base_events.py.default_exception_handler(1608): Exception in callback Asy
ncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55
```
Looks like at some point the websockets connection to heroku gets corrupted. When PeerJS tries to recover and connect with its known peerid, the signaling server thinks that it is a conflicting registration from a different peer instead of reconnection attempt from the same peer.
One possible solution is to pause a bit longer to give the signaling server a chance to clean up the peer id from its realm due to inactivity. Then try to reconnect.
Another option is to make the signaling server smarter and make it check the source of the reconnection request against the original source. If they are the same, allow the reconnect with same Peer ID.
The issue is still present. After a few hours of normal operation, the python peer eventually goes into this endless loop unable to regain connection to the signaling server due to its peer ID already being reserved.
```
ambianic-edge | 2020-11-26 03:39:54 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(158): Peer 7248f792-abec-4f72-a581-8dacd0308b72 disconnected from server.
ambianic-edge | 2020-11-26 03:39:54 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(172): Peer connection closed
ambianic-edge | 2020-11-26 03:39:55 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:39:55 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 423, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-11-26 03:39:55 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:55 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:55 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(176): Peer error ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-11-26 03:39:55 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(177): peerConnectionStatus None
ambianic-edge | 2020-11-26 03:39:55 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(158): Peer 7248f792-abec-4f72-a581-8dacd0308b72 disconnected from server.
ambianic-edge | 2020-11-26 03:39:55 ERROR /usr/lib/python3.7/asyncio/base_events.py.default_exception_handler(1608): Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55
ambianic-edge | handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55>
ambianic-edge | Traceback (most recent call last):
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py", line 62, in _callback
ambianic-edge | self.emit('error', exc)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 116, in emit
ambianic-edge | self._emit_handle_potential_error(event, args[0] if args else None)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 86, in _emit_handle_potential_error
ambianic-edge | raise error
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 167, in peer_disconnected
ambianic-edge | await peer.reconnect()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 537, in reconnect
ambianic-edge | raise RuntimeError("This peer cannot reconnect to the server. "
ambianic-edge | RuntimeError: This peer cannot reconnect to the server. It has already been destroyed.
ambianic-edge | 2020-11-26 03:39:55 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 335.64 ms, 2.15 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:55 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.18 ms, 2.06 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:56 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.65 ms, 2.12 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:56 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.86 ms, 2.09 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:57 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.79 ms, 2.07 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:57 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.10 ms, 2.16 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:57 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(328): Peer destroyed. Will create a new peer.
ambianic-edge | 2020-11-26 03:39:57 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(287): creating peer
ambianic-edge | 2020-11-26 03:39:57 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(293): last saved savedPeerId 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:39:57 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(295): Peer session token 46013141-92be-4f4a-a304-16294071bab9
ambianic-edge | 2020-11-26 03:39:57 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(310): pnpService: peer created with id 7248f792-abec-4f72-a581-8dacd0308b72 , options: PeerOptions(host='ambianic-pnp.herokuapp.com', port=443, path='/', key='peerjs', token='46013141-92be-4f4a-a304-16294071bab9', config=RTCConfiguration(iceServers=[RTCIceServer(urls=['stun:stun.l.google.com:19302'], username=None, credential=None, credentialType='password')]), secure=True, pingInterval=5)
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(312): peer activated
ambianic-edge | 2020-11-26 03:39:58 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 423, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:58 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(176): Peer error ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(177): peerConnectionStatus None
ambianic-edge | 2020-11-26 03:39:58 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 331.92 ms, 2.04 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(158): Peer 7248f792-abec-4f72-a581-8dacd0308b72 disconnected from server.
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(172): Peer connection closed
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:39:58 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 423, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:39:58 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(176): Peer error ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-11-26 03:39:58 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(177): peerConnectionStatus None
ambianic-edge | 2020-11-26 03:39:58 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(158): Peer 7248f792-abec-4f72-a581-8dacd0308b72 disconnected from server.
ambianic-edge | 2020-11-26 03:39:58 ERROR /usr/lib/python3.7/asyncio/base_events.py.default_exception_handler(1608): Exception in callback AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55
ambianic-edge | handle: <Handle AsyncIOEventEmitter._emit_run.<locals>._callback(<Task finishe... destroyed.')>) at /usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py:55>
ambianic-edge | Traceback (most recent call last):
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_asyncio.py", line 62, in _callback
ambianic-edge | self.emit('error', exc)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 116, in emit
ambianic-edge | self._emit_handle_potential_error(event, args[0] if args else None)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/pyee/_base.py", line 86, in _emit_handle_potential_error
ambianic-edge | raise error
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 167, in peer_disconnected
ambianic-edge | await peer.reconnect()
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 537, in reconnect
ambianic-edge | raise RuntimeError("This peer cannot reconnect to the server. "
ambianic-edge | RuntimeError: This peer cannot reconnect to the server. It has already been destroyed.
ambianic-edge | 2020-11-26 03:39:58 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 329.37 ms, 2.09 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:59 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.46 ms, 2.10 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:39:59 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.69 ms, 2.09 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:40:00 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.25 ms, 2.14 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:40:00 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.72 ms, 2.07 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:40:00 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 328.87 ms, 2.12 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.make_discoverable(328): Peer destroyed. Will create a new peer.
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(287): creating peer
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(293): last saved savedPeerId 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(295): Peer session token 9e6a734a-9bdd-495d-b920-1647f034773c
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(310): pnpService: peer created with id 7248f792-abec-4f72-a581-8dacd0308b72 , options: PeerOptions(host='ambianic-pnp.herokuapp.com', port=443, path='/', key='peerjs', token='9e6a734a-9bdd-495d-b920-1647f034773c', config=RTCConfiguration(iceServers=[RTCIceServer(urls=['stun:stun.l.google.com:19302'], username=None, credential=None, credentialType='password')]), secure=True, pingInterval=5)
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.pnp_service_connect(312): peer activated
ambianic-edge | 2020-11-26 03:40:01 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 423, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:40:01 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(176): Peer error ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(177): peerConnectionStatus None
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_disconnected(158): Peer 7248f792-abec-4f72-a581-8dacd0308b72 disconnected from server.
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_close(172): Peer connection closed
ambianic-edge | 2020-11-26 03:40:01 INFO /opt/ambianic-edge/src/ambianic/pipeline/ai/image_detection.py._log_stats(160): Inference time 330.29 ms, 2.05 fps in pipeline area_watch
ambianic-edge | 2020-11-26 03:40:01 INFO /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.start(118): Peer started with UUID: 7248f792-abec-4f72-a581-8dacd0308b72
ambianic-edge | 2020-11-26 03:40:01 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/peer.py._abort(454): Aborting!
ambianic-edge | PeerErrorType: PeerErrorType.UnavailableID
ambianic-edge | Error message: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
ambianic-edge | "__main__", mod_spec)
ambianic-edge | File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
ambianic-edge | exec(code, run_globals)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py", line 423, in <module>
ambianic-edge | loop.run_until_complete(_start())
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 571, in run_until_complete
ambianic-edge | self.run_forever()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 539, in run_forever
ambianic-edge | self._run_once()
ambianic-edge | File "/usr/lib/python3.7/asyncio/base_events.py", line 1775, in _run_once
ambianic-edge | handle._run()
ambianic-edge | File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
ambianic-edge | self._context.run(self._callback, *self._args)
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 231, in _on_server_idtaken
ambianic-edge | f'ID "${self.id}" is taken')
ambianic-edge | File "/usr/local/lib/python3.7/dist-packages/peerjs/peer.py", line 455, in _abort
ambianic-edge | traceback.print_stack()
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(464): Connection error: ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/peer.py.emitError(469): Connection error:
ambianic-edge | ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | 2020-11-26 03:40:01 ERROR /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(176): Peer error ID "$7248f792-abec-4f72-a581-8dacd0308b72" is taken
ambianic-edge | NoneType: None
ambianic-edge | 2020-11-26 03:40:01 WARNING /usr/local/lib/python3.7/dist-packages/peerjs/ext/http-proxy.py.peer_error(177): peerConnectionStatus None
``` | 2020-11-28T20:12:34 | 0.0 | [] | [] |
||
openwisp/openwisp-radius | openwisp__openwisp-radius-444 | a133977ccba7e9d4ae67997cc5760e239669734d | diff --git a/openwisp_radius/admin.py b/openwisp_radius/admin.py
index bc225c9f..6cfebdc6 100644
--- a/openwisp_radius/admin.py
+++ b/openwisp_radius/admin.py
@@ -7,6 +7,7 @@
from django.contrib.admin.utils import model_ngettext
from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
+from django.templatetags.static import static
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
@@ -527,8 +528,8 @@ def get_is_verified(self, obj):
value = 'yes' if obj.registered_user.is_verified else 'no'
except Exception:
value = 'unknown'
-
- return mark_safe(f'<img src="/static/admin/img/icon-{value}.svg" alt="{value}">')
+ icon_url = static(f'admin/img/icon-{value}.svg')
+ return mark_safe(f'<img src="{icon_url}" alt="{value}">')
UserAdmin.get_is_verified = get_is_verified
| [bug] Load image using static()
Same as https://github.com/openwisp/openwisp-monitoring/commit/cf67548393162a00e3773ecca8e99605cba185fd here:
https://github.com/openwisp/openwisp-radius/blob/61a01fd03f44f2519e2cbe5a1cc9fff3db00115b/openwisp_radius/admin.py#L532
[bug] Load image using static()
Same as https://github.com/openwisp/openwisp-monitoring/commit/cf67548393162a00e3773ecca8e99605cba185fd here:
https://github.com/openwisp/openwisp-radius/blob/61a01fd03f44f2519e2cbe5a1cc9fff3db00115b/openwisp_radius/admin.py#L532
| 2022-10-12T08:02:37 | 0.0 | [] | [] |
|||
chelnak/jenkins-tui | chelnak__jenkins-tui-38 | ce07926559cee829affe7ed9c638cac361d7f117 | diff --git a/poetry.lock b/poetry.lock
index 99a9068..a9df738 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,6 +1,6 @@
[[package]]
name = "anyio"
-version = "3.3.2"
+version = "3.3.3"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
category = "main"
optional = false
@@ -59,7 +59,7 @@ d = ["aiohttp (>=3.3.2)", "aiohttp-cors"]
[[package]]
name = "certifi"
-version = "2021.5.30"
+version = "2021.10.8"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
@@ -75,7 +75,7 @@ python-versions = ">=3.6.1"
[[package]]
name = "charset-normalizer"
-version = "2.0.6"
+version = "2.0.7"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false
@@ -86,7 +86,7 @@ unicode_backport = ["unicodedata2"]
[[package]]
name = "click"
-version = "8.0.1"
+version = "8.0.3"
description = "Composable command line interface toolkit"
category = "dev"
optional = false
@@ -196,14 +196,6 @@ category = "main"
optional = false
python-versions = ">=3.5"
-[[package]]
-name = "multi-key-dict"
-version = "2.0.3"
-description = "Multi key dictionary implementation"
-category = "main"
-optional = false
-python-versions = "*"
-
[[package]]
name = "mypy"
version = "0.910"
@@ -245,14 +237,6 @@ category = "dev"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
-[[package]]
-name = "pbr"
-version = "5.6.0"
-description = "Python Build Reasonableness"
-category = "main"
-optional = false
-python-versions = ">=2.6"
-
[[package]]
name = "platformdirs"
version = "2.4.0"
@@ -289,20 +273,6 @@ category = "main"
optional = false
python-versions = ">=3.5"
-[[package]]
-name = "python-jenkins"
-version = "1.7.0"
-description = "Python bindings for the remote Jenkins API"
-category = "main"
-optional = false
-python-versions = "*"
-
-[package.dependencies]
-multi-key-dict = "*"
-pbr = ">=0.8.2"
-requests = "*"
-six = ">=1.3.0"
-
[[package]]
name = "pyyaml"
version = "5.4.1"
@@ -313,30 +283,12 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
[[package]]
name = "regex"
-version = "2021.9.30"
+version = "2021.10.8"
description = "Alternative regular expression module, to replace re."
category = "dev"
optional = false
python-versions = "*"
-[[package]]
-name = "requests"
-version = "2.26.0"
-description = "Python HTTP for Humans."
-category = "main"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
-
-[package.dependencies]
-certifi = ">=2017.4.17"
-charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
-idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
-urllib3 = ">=1.21.1,<1.27"
-
-[package.extras]
-socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
-use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
-
[[package]]
name = "rfc3986"
version = "1.5.0"
@@ -353,11 +305,11 @@ idna2008 = ["idna"]
[[package]]
name = "rich"
-version = "10.11.0"
+version = "10.12.0"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
category = "main"
optional = false
-python-versions = ">=3.6,<4.0"
+python-versions = ">=3.6.2,<4.0.0"
[package.dependencies]
colorama = ">=0.4.0,<0.5.0"
@@ -371,7 +323,7 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
-category = "main"
+category = "dev"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
@@ -412,17 +364,17 @@ python-versions = "*"
[[package]]
name = "types-requests"
-version = "2.25.9"
+version = "2.25.10"
description = "Typing stubs for requests"
-category = "main"
+category = "dev"
optional = false
python-versions = "*"
[[package]]
name = "types-toml"
-version = "0.10.0"
+version = "0.10.1"
description = "Typing stubs for toml"
-category = "main"
+category = "dev"
optional = false
python-versions = "*"
@@ -434,19 +386,6 @@ category = "dev"
optional = false
python-versions = "*"
-[[package]]
-name = "urllib3"
-version = "1.26.7"
-description = "HTTP library with thread-safe connection pooling, file post, and more."
-category = "main"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
-
-[package.extras]
-brotli = ["brotlipy (>=0.6.0)"]
-secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
-socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
-
[[package]]
name = "virtualenv"
version = "20.8.1"
@@ -469,12 +408,12 @@ testing = ["coverage (>=4)", "coverage-enable-subprocess (>=1)", "flaky (>=3)",
[metadata]
lock-version = "1.1"
python-versions = "^3.9"
-content-hash = "90d6773f76e402745030636fe4746ea26c2547edbdd7557cc5db2fa536c8cf68"
+content-hash = "a1c9309912cef1a8b4b9530c856b1c7fce9b11ca4bb802f051851a5841431580"
[metadata.files]
anyio = [
- {file = "anyio-3.3.2-py3-none-any.whl", hash = "sha256:c32da314c510b34a862f5afeaf8a446ffed2c2fde21583e654bd71ecfb5b744b"},
- {file = "anyio-3.3.2.tar.gz", hash = "sha256:0b993a2ef6c1dc456815c2b5ca2819f382f20af98087cc2090a4afed3a501436"},
+ {file = "anyio-3.3.3-py3-none-any.whl", hash = "sha256:56ceaeed2877723578b1341f4f68c29081db189cfb40a97d1922b9513f6d7db6"},
+ {file = "anyio-3.3.3.tar.gz", hash = "sha256:8eccec339cb4a856c94a75d50fc1d451faf32a05ef406be462e2efc59c9838b0"},
]
appdirs = [
{file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
@@ -488,20 +427,20 @@ black = [
{file = "black-20.8b1.tar.gz", hash = "sha256:1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea"},
]
certifi = [
- {file = "certifi-2021.5.30-py2.py3-none-any.whl", hash = "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8"},
- {file = "certifi-2021.5.30.tar.gz", hash = "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee"},
+ {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
+ {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
]
cfgv = [
{file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"},
{file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"},
]
charset-normalizer = [
- {file = "charset-normalizer-2.0.6.tar.gz", hash = "sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f"},
- {file = "charset_normalizer-2.0.6-py3-none-any.whl", hash = "sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6"},
+ {file = "charset-normalizer-2.0.7.tar.gz", hash = "sha256:e019de665e2bcf9c2b64e2e5aa025fa991da8720daa3c1138cadd2fd1856aed0"},
+ {file = "charset_normalizer-2.0.7-py3-none-any.whl", hash = "sha256:f7af805c321bfa1ce6714c51f254e0d5bb5e5834039bc17db7ebe3a4cec9492b"},
]
click = [
- {file = "click-8.0.1-py3-none-any.whl", hash = "sha256:fba402a4a47334742d782209a7c79bc448911afe1149d07bdabdf480b3e2f4b6"},
- {file = "click-8.0.1.tar.gz", hash = "sha256:8c04c11192119b1ef78ea049e0a6f0463e4c48ef00a30160c704337586f3ad7a"},
+ {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"},
+ {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"},
]
colorama = [
{file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
@@ -539,12 +478,6 @@ idna = [
{file = "idna-3.2-py3-none-any.whl", hash = "sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a"},
{file = "idna-3.2.tar.gz", hash = "sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3"},
]
-multi-key-dict = [
- {file = "multi_key_dict-2.0.3.tar.gz", hash = "sha256:deebdec17aa30a1c432cb3f437e81f8621e1c0542a0c0617a74f71e232e9939e"},
- {file = "multi_key_dict-2.0.3.win-amd64.exe", hash = "sha256:fb67a532d7361a66820aa1e8fe6c0c939f4c34a3a09a3e8db199ce7b77c4fb78"},
- {file = "multi_key_dict-2.0.3.win32.exe", hash = "sha256:cb1e00aa9d8192496cc0cc040f6d9602f35e4cf099e866248be06b04fd45b42b"},
- {file = "multi_key_dict-2.0.3.zip", hash = "sha256:3a1e1fc705a30a7de1a153ec2992b3ca3655ccd9225d2e427fe6525c8f160d6d"},
-]
mypy = [
{file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"},
{file = "mypy-0.910-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb"},
@@ -582,10 +515,6 @@ pathspec = [
{file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
{file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
]
-pbr = [
- {file = "pbr-5.6.0-py2.py3-none-any.whl", hash = "sha256:c68c661ac5cc81058ac94247278eeda6d2e6aecb3e227b0387c30d277e7ef8d4"},
- {file = "pbr-5.6.0.tar.gz", hash = "sha256:42df03e7797b796625b1029c0400279c7c34fd7df24a7d7818a1abb5b38710dd"},
-]
platformdirs = [
{file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"},
{file = "platformdirs-2.4.0.tar.gz", hash = "sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2"},
@@ -598,10 +527,6 @@ pygments = [
{file = "Pygments-2.10.0-py3-none-any.whl", hash = "sha256:b8e67fe6af78f492b3c4b3e2970c0624cbf08beb1e493b2c99b9fa1b67a20380"},
{file = "Pygments-2.10.0.tar.gz", hash = "sha256:f398865f7eb6874156579fdf36bc840a03cab64d1cde9e93d68f46a425ec52c6"},
]
-python-jenkins = [
- {file = "python-jenkins-1.7.0.tar.gz", hash = "sha256:deed8fa79d32769a615984a5dde5e01eda04914d3f4091bd9a23d30474695106"},
- {file = "python_jenkins-1.7.0-py3-none-any.whl", hash = "sha256:c49c6e8770966906c0be1fe21d5e2ba08e08c93f315632929b20b3c2f2c3004c"},
-]
pyyaml = [
{file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"},
{file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"},
@@ -634,59 +559,55 @@ pyyaml = [
{file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"},
]
regex = [
- {file = "regex-2021.9.30-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:66696c8336a1b5d1182464f3af3427cc760118f26d0b09a2ddc16a976a4d2637"},
- {file = "regex-2021.9.30-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d87459ad3ab40cd8493774f8a454b2e490d8e729e7e402a0625867a983e4e02"},
- {file = "regex-2021.9.30-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78cf6a1e023caf5e9a982f5377414e1aeac55198831b852835732cfd0a0ca5ff"},
- {file = "regex-2021.9.30-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:255791523f80ea8e48e79af7120b4697ef3b74f6886995dcdb08c41f8e516be0"},
- {file = "regex-2021.9.30-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e502f8d4e5ef714bcc2c94d499684890c94239526d61fdf1096547db91ca6aa6"},
- {file = "regex-2021.9.30-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4907fb0f9b9309a5bded72343e675a252c2589a41871874feace9a05a540241e"},
- {file = "regex-2021.9.30-cp310-cp310-win32.whl", hash = "sha256:3be40f720af170a6b20ddd2ad7904c58b13d2b56f6734ee5d09bbdeed2fa4816"},
- {file = "regex-2021.9.30-cp310-cp310-win_amd64.whl", hash = "sha256:c2b180ed30856dfa70cfe927b0fd38e6b68198a03039abdbeb1f2029758d87e7"},
- {file = "regex-2021.9.30-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e6f2d2f93001801296fe3ca86515eb04915472b5380d4d8752f09f25f0b9b0ed"},
- {file = "regex-2021.9.30-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fa7ba9ab2eba7284e0d7d94f61df7af86015b0398e123331362270d71fab0b9"},
- {file = "regex-2021.9.30-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28040e89a04b60d579c69095c509a4f6a1a5379cd865258e3a186b7105de72c6"},
- {file = "regex-2021.9.30-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f588209d3e4797882cd238195c175290dbc501973b10a581086b5c6bcd095ffb"},
- {file = "regex-2021.9.30-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42952d325439ef223e4e9db7ee6d9087b5c68c5c15b1f9de68e990837682fc7b"},
- {file = "regex-2021.9.30-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cae4099031d80703954c39680323dabd87a69b21262303160776aa0e55970ca0"},
- {file = "regex-2021.9.30-cp36-cp36m-win32.whl", hash = "sha256:0de8ad66b08c3e673b61981b9e3626f8784d5564f8c3928e2ad408c0eb5ac38c"},
- {file = "regex-2021.9.30-cp36-cp36m-win_amd64.whl", hash = "sha256:b345ecde37c86dd7084c62954468a4a655fd2d24fd9b237949dd07a4d0dd6f4c"},
- {file = "regex-2021.9.30-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6f08187136f11e430638c2c66e1db091105d7c2e9902489f0dbc69b44c222b4"},
- {file = "regex-2021.9.30-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b55442650f541d195a535ccec33078c78a9521973fb960923da7515e9ed78fa6"},
- {file = "regex-2021.9.30-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87e9c489aa98f50f367fb26cc9c8908d668e9228d327644d7aa568d47e456f47"},
- {file = "regex-2021.9.30-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e2cb7d4909ed16ed35729d38af585673f1f0833e73dfdf0c18e5be0061107b99"},
- {file = "regex-2021.9.30-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0861e7f6325e821d5c40514c551fd538b292f8cc3960086e73491b9c5d8291d"},
- {file = "regex-2021.9.30-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:81fdc90f999b2147fc62e303440c424c47e5573a9b615ed5d43a5b832efcca9e"},
- {file = "regex-2021.9.30-cp37-cp37m-win32.whl", hash = "sha256:8c1ad61fa024195136a6b7b89538030bd00df15f90ac177ca278df9b2386c96f"},
- {file = "regex-2021.9.30-cp37-cp37m-win_amd64.whl", hash = "sha256:e3770781353a4886b68ef10cec31c1f61e8e3a0be5f213c2bb15a86efd999bc4"},
- {file = "regex-2021.9.30-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c065d95a514a06b92a5026766d72ac91bfabf581adb5b29bc5c91d4b3ee9b83"},
- {file = "regex-2021.9.30-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9925985be05d54b3d25fd6c1ea8e50ff1f7c2744c75bdc4d3b45c790afa2bcb3"},
- {file = "regex-2021.9.30-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:470f2c882f2672d8eeda8ab27992aec277c067d280b52541357e1acd7e606dae"},
- {file = "regex-2021.9.30-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ad0517df22a97f1da20d8f1c8cb71a5d1997fa383326b81f9cf22c9dadfbdf34"},
- {file = "regex-2021.9.30-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e30838df7bfd20db6466fd309d9b580d32855f8e2c2e6d74cf9da27dcd9b63"},
- {file = "regex-2021.9.30-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5b34d2335d6aedec7dcadd3f8283b9682fadad8b9b008da8788d2fce76125ebe"},
- {file = "regex-2021.9.30-cp38-cp38-win32.whl", hash = "sha256:e07049cece3462c626d650e8bf42ddbca3abf4aa08155002c28cb6d9a5a281e2"},
- {file = "regex-2021.9.30-cp38-cp38-win_amd64.whl", hash = "sha256:37868075eda024470bd0feab872c692ac4ee29db1e14baec103257bf6cc64346"},
- {file = "regex-2021.9.30-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d331f238a7accfbbe1c4cd1ba610d4c087b206353539331e32a8f05345c74aec"},
- {file = "regex-2021.9.30-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6348a7ab2a502cbdd0b7fd0496d614007489adb7361956b38044d1d588e66e04"},
- {file = "regex-2021.9.30-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b1cca6c23f19bee8dc40228d9c314d86d1e51996b86f924aca302fc8f8bf9"},
- {file = "regex-2021.9.30-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1f1125bc5172ab3a049bc6f4b9c0aae95a2a2001a77e6d6e4239fa3653e202b5"},
- {file = "regex-2021.9.30-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:638e98d069b14113e8afba6a54d1ca123f712c0d105e67c1f9211b2a825ef926"},
- {file = "regex-2021.9.30-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9a0b0db6b49da7fa37ca8eddf9f40a8dbc599bad43e64f452284f37b6c34d91c"},
- {file = "regex-2021.9.30-cp39-cp39-win32.whl", hash = "sha256:9910869c472e5a6728680ca357b5846546cbbd2ab3ad5bef986ef0bc438d0aa6"},
- {file = "regex-2021.9.30-cp39-cp39-win_amd64.whl", hash = "sha256:3b71213ec3bad9a5a02e049f2ec86b3d7c3e350129ae0f4e2f99c12b5da919ed"},
- {file = "regex-2021.9.30.tar.gz", hash = "sha256:81e125d9ba54c34579e4539a967e976a3c56150796674aec318b1b2f49251be7"},
-]
-requests = [
- {file = "requests-2.26.0-py2.py3-none-any.whl", hash = "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24"},
- {file = "requests-2.26.0.tar.gz", hash = "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7"},
+ {file = "regex-2021.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:981c786293a3115bc14c103086ae54e5ee50ca57f4c02ce7cf1b60318d1e8072"},
+ {file = "regex-2021.10.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51feefd58ac38eb91a21921b047da8644155e5678e9066af7bcb30ee0dca7361"},
+ {file = "regex-2021.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea8de658d7db5987b11097445f2b1f134400e2232cb40e614e5f7b6f5428710e"},
+ {file = "regex-2021.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1ce02f420a7ec3b2480fe6746d756530f69769292eca363218c2291d0b116a01"},
+ {file = "regex-2021.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39079ebf54156be6e6902f5c70c078f453350616cfe7bfd2dd15bdb3eac20ccc"},
+ {file = "regex-2021.10.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ff24897f6b2001c38a805d53b6ae72267025878d35ea225aa24675fbff2dba7f"},
+ {file = "regex-2021.10.8-cp310-cp310-win32.whl", hash = "sha256:c6569ba7b948c3d61d27f04e2b08ebee24fec9ff8e9ea154d8d1e975b175bfa7"},
+ {file = "regex-2021.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:45cb0f7ff782ef51bc79e227a87e4e8f24bc68192f8de4f18aae60b1d60bc152"},
+ {file = "regex-2021.10.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fab3ab8aedfb443abb36729410403f0fe7f60ad860c19a979d47fb3eb98ef820"},
+ {file = "regex-2021.10.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74e55f8d66f1b41d44bc44c891bcf2c7fad252f8f323ee86fba99d71fd1ad5e3"},
+ {file = "regex-2021.10.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d52c5e089edbdb6083391faffbe70329b804652a53c2fdca3533e99ab0580d9"},
+ {file = "regex-2021.10.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1abbd95cbe9e2467cac65c77b6abd9223df717c7ae91a628502de67c73bf6838"},
+ {file = "regex-2021.10.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b5c215f3870aa9b011c00daeb7be7e1ae4ecd628e9beb6d7e6107e07d81287"},
+ {file = "regex-2021.10.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f540f153c4f5617bc4ba6433534f8916d96366a08797cbbe4132c37b70403e92"},
+ {file = "regex-2021.10.8-cp36-cp36m-win32.whl", hash = "sha256:1f51926db492440e66c89cd2be042f2396cf91e5b05383acd7372b8cb7da373f"},
+ {file = "regex-2021.10.8-cp36-cp36m-win_amd64.whl", hash = "sha256:5f55c4804797ef7381518e683249310f7f9646da271b71cb6b3552416c7894ee"},
+ {file = "regex-2021.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:fb2baff66b7d2267e07ef71e17d01283b55b3cc51a81b54cc385e721ae172ba4"},
+ {file = "regex-2021.10.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e527ab1c4c7cf2643d93406c04e1d289a9d12966529381ce8163c4d2abe4faf"},
+ {file = "regex-2021.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c98b013273e9da5790ff6002ab326e3f81072b4616fd95f06c8fa733d2745f"},
+ {file = "regex-2021.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:55ef044899706c10bc0aa052f2fc2e58551e2510694d6aae13f37c50f3f6ff61"},
+ {file = "regex-2021.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0ab3530a279a3b7f50f852f1bab41bc304f098350b03e30a3876b7dd89840e"},
+ {file = "regex-2021.10.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a37305eb3199d8f0d8125ec2fb143ba94ff6d6d92554c4b8d4a8435795a6eccd"},
+ {file = "regex-2021.10.8-cp37-cp37m-win32.whl", hash = "sha256:2efd47704bbb016136fe34dfb74c805b1ef5c7313aef3ce6dcb5ff844299f432"},
+ {file = "regex-2021.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:924079d5590979c0e961681507eb1773a142553564ccae18d36f1de7324e71ca"},
+ {file = "regex-2021.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b09d3904bf312d11308d9a2867427479d277365b1617e48ad09696fa7dfcdf59"},
+ {file = "regex-2021.10.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f125fce0a0ae4fd5c3388d369d7a7d78f185f904c90dd235f7ecf8fe13fa741"},
+ {file = "regex-2021.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f199419a81c1016e0560c39773c12f0bd924c37715bffc64b97140d2c314354"},
+ {file = "regex-2021.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:09e1031e2059abd91177c302da392a7b6859ceda038be9e015b522a182c89e4f"},
+ {file = "regex-2021.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c070d5895ac6aeb665bd3cd79f673775caf8d33a0b569e98ac434617ecea57d"},
+ {file = "regex-2021.10.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:176796cb7f82a7098b0c436d6daac82f57b9101bb17b8e8119c36eecf06a60a3"},
+ {file = "regex-2021.10.8-cp38-cp38-win32.whl", hash = "sha256:5e5796d2f36d3c48875514c5cd9e4325a1ca172fc6c78b469faa8ddd3d770593"},
+ {file = "regex-2021.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:e4204708fa116dd03436a337e8e84261bc8051d058221ec63535c9403a1582a1"},
+ {file = "regex-2021.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8b6ee6555b6fbae578f1468b3f685cdfe7940a65675611365a7ea1f8d724991"},
+ {file = "regex-2021.10.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973499dac63625a5ef9dfa4c791aa33a502ddb7615d992bdc89cf2cc2285daa3"},
+ {file = "regex-2021.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88dc3c1acd3f0ecfde5f95c32fcb9beda709dbdf5012acdcf66acbc4794468eb"},
+ {file = "regex-2021.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4786dae85c1f0624ac77cb3813ed99267c9adb72e59fdc7297e1cf4d6036d493"},
+ {file = "regex-2021.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe6ce4f3d3c48f9f402da1ceb571548133d3322003ce01b20d960a82251695d2"},
+ {file = "regex-2021.10.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9e3e2cea8f1993f476a6833ef157f5d9e8c75a59a8d8b0395a9a6887a097243b"},
+ {file = "regex-2021.10.8-cp39-cp39-win32.whl", hash = "sha256:82cfb97a36b1a53de32b642482c6c46b6ce80803854445e19bc49993655ebf3b"},
+ {file = "regex-2021.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:b04e512eb628ea82ed86eb31c0f7fc6842b46bf2601b66b1356a7008327f7700"},
+ {file = "regex-2021.10.8.tar.gz", hash = "sha256:26895d7c9bbda5c52b3635ce5991caa90fbb1ddfac9c9ff1c7ce505e2282fb2a"},
]
rfc3986 = [
{file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"},
{file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"},
]
rich = [
- {file = "rich-10.11.0-py3-none-any.whl", hash = "sha256:44bb3f9553d00b3c8938abf89828df870322b9ba43caf3b12bb7758debdc6dec"},
- {file = "rich-10.11.0.tar.gz", hash = "sha256:016fa105f34b69c434e7f908bb5bd7fefa9616efdb218a2917117683a6394ce5"},
+ {file = "rich-10.12.0-py3-none-any.whl", hash = "sha256:c30d6808d1cd3defd56a7bd2d587d13e53b5f55de6cf587f035bcbb56bc3f37b"},
+ {file = "rich-10.12.0.tar.gz", hash = "sha256:83fb3eff778beec3c55201455c17cccde1ccdf66d5b4dade8ef28f56b50c4bd4"},
]
six = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
@@ -737,22 +658,18 @@ typed-ast = [
{file = "typed_ast-1.4.3.tar.gz", hash = "sha256:fb1bbeac803adea29cedd70781399c99138358c26d05fcbd23c13016b7f5ec65"},
]
types-requests = [
- {file = "types-requests-2.25.9.tar.gz", hash = "sha256:4ec8b71da73e5344adb9bee725a74ec8598e7286f9bcb17500d627f259fe4fb9"},
- {file = "types_requests-2.25.9-py3-none-any.whl", hash = "sha256:543ba8b3b23e38ac028da1d163aecbbc27d3cc8f654ae64339da539a191a2b1c"},
+ {file = "types-requests-2.25.10.tar.gz", hash = "sha256:3e121988168cffcfa61effaf48f90ebc5ee023f6cc50d04c0144edd7a2265b65"},
+ {file = "types_requests-2.25.10-py3-none-any.whl", hash = "sha256:bf3681e9258df22b27b623167b132869a26f04d5ca570e6a81a932db2a19ab72"},
]
types-toml = [
- {file = "types-toml-0.10.0.tar.gz", hash = "sha256:64f88a257dd62465b01fcf0d1ed4ffcaf19e320ee3e731c26a2e9dcc5090fdbb"},
- {file = "types_toml-0.10.0-py3-none-any.whl", hash = "sha256:c1bbb97ebe59593378769447343f78187017458cce8c3f1e76c80b9a930eef01"},
+ {file = "types-toml-0.10.1.tar.gz", hash = "sha256:5c1f8f8d57692397c8f902bf6b4d913a0952235db7db17d2908cc110e70610cb"},
+ {file = "types_toml-0.10.1-py3-none-any.whl", hash = "sha256:8cdfd2b7c89bed703158b042dd5cf04255dae77096db66f4a12ca0a93ccb07a5"},
]
typing-extensions = [
{file = "typing_extensions-3.10.0.2-py2-none-any.whl", hash = "sha256:d8226d10bc02a29bcc81df19a26e56a9647f8b0a6d4a83924139f4a8b01f17b7"},
{file = "typing_extensions-3.10.0.2-py3-none-any.whl", hash = "sha256:f1d25edafde516b146ecd0613dabcc61409817af4766fbbcfb8d1ad4ec441a34"},
{file = "typing_extensions-3.10.0.2.tar.gz", hash = "sha256:49f75d16ff11f1cd258e1b988ccff82a3ca5570217d7ad8c5f48205dd99a677e"},
]
-urllib3 = [
- {file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"},
- {file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"},
-]
virtualenv = [
{file = "virtualenv-20.8.1-py2.py3-none-any.whl", hash = "sha256:10062e34c204b5e4ec5f62e6ef2473f8ba76513a9a617e873f1f8fb4a519d300"},
{file = "virtualenv-20.8.1.tar.gz", hash = "sha256:bcc17f0b3a29670dd777d6f0755a4c04f28815395bca279cdcb213b97199a6b8"},
diff --git a/src/jenkins_tui/app.py b/src/jenkins_tui/app.py
index 9001f8d..2ca5286 100644
--- a/src/jenkins_tui/app.py
+++ b/src/jenkins_tui/app.py
@@ -1,17 +1,18 @@
import os
import sys
+from typing import List
from urllib.parse import urlparse
+from textual.widget import Widget
import toml
-from typing import Dict, List, Union
-
from textual.app import App
+from textual.events import Mount
from textual.widgets import ScrollView
-from textual.reactive import Reactive
+
from . import config
from .client import Jenkins
-from .views import WindowView
+from .views import TestScrollView
from .widgets import (
Header,
Footer,
@@ -40,10 +41,6 @@ class ClientConfig:
class JenkinsTUI(App):
"""This is the base class for Jenkins TUI."""
- current_node: Reactive[str] = Reactive("root")
- chicken_mode_enabled: Reactive[bool] = False
- client: Jenkins
-
def __init__(
self, title: str, log: str = None, chicken_mode_enabled: bool = False, **kwargs
):
@@ -55,18 +52,12 @@ def __init__(
chicken_mode_enabled (bool, optional): Enable super special chicken mode. Defaults to False.
"""
- self.chicken_mode_enabled = chicken_mode_enabled
-
- self.posible_areas = {
- "info": "col,info",
- "builds": "col,builds",
- "executor": "col,executor",
- "queue": "col,queue",
- }
+ super().__init__(title=title, log=log, log_verbosity=1, **kwargs)
- super().__init__(title=title, log=log, log_verbosity=5)
+ self.chicken_mode_enabled = chicken_mode_enabled
+ self.current_node = "root"
- def __get_client(self) -> Jenkins:
+ def _get_client(self) -> Jenkins:
"""Gets an instance of jenkins.Jenkins. Arguments are read from config. If the config doesn't exist, the user is prompted with some questions.
Returns:
@@ -100,8 +91,6 @@ def __get_client(self) -> Jenkins:
password = client_config.password
_client = Jenkins(url=url, username=username, password=password)
-
- self.log("Validating client connection..")
return _client
except Exception as e:
self.console.print(
@@ -115,29 +104,28 @@ def __get_client(self) -> Jenkins:
async def on_load(self) -> None:
"""Overrides on_load from App()"""
- self.client = self.__get_client()
+ self.client = self._get_client()
await self.bind("b", "view.toggle('sidebar')", "Toggle sidebar")
await self.bind("r", "refresh_tree", "Refresh")
await self.bind("q", "quit", "Quit")
- async def on_mount(self) -> None:
+ async def on_mount(self, event: Mount) -> None:
"""Overrides on_mount from App()"""
# Dock header and footer
-
await self.view.dock(Header(), edge="top")
await self.view.dock(Footer(), edge="bottom")
# Dock tree
directory = JenkinsTree(client=self.client, name="JenkinsTreeWidget")
- self.directory_scroll_view = ScrollView(
- contents=directory, name="DirectoryScrollView"
- )
- self.directory_scroll_view.vscroll = ScrollBar()
- await self.view.dock(
- self.directory_scroll_view, edge="left", size=40, name="sidebar"
+
+ self.tree_container = ScrollView(
+ contents=directory,
+ name="DirectoryScrollView",
)
+ self.tree_container.vscroll = ScrollBar()
+ await self.view.dock(self.tree_container, edge="left", size=40, name="sidebar")
# Dock container
# This is the main container that holds our info widget and the body
@@ -145,8 +133,9 @@ async def on_mount(self) -> None:
self.build_queue = BuildQueue(client=self.client)
self.executor_status = ExecutorStatus(client=self.client)
- self.container = WindowView()
- await self.container.dock(*[self.info, self.build_queue, self.executor_status])
+ widgets = [self.info, self.build_queue, self.executor_status]
+ self.container = TestScrollView(contents=widgets)
+ self.container.vscroll = ScrollBar()
await self.view.dock(self.container)
@@ -162,9 +151,8 @@ async def handle_root_click(self, message: RootClick) -> None:
async def set_home() -> None:
"""Used to set the content of the homescren"""
- await self.container.update(
- self.info, self.build_queue, self.executor_status
- )
+ widgets = [self.info, self.build_queue, self.executor_status]
+ await self.container.window.update(widgets=widgets)
if self.current_node != "root":
self.current_node = "root"
@@ -200,7 +188,9 @@ async def set_job() -> None:
info = JobInfo(title=name, text=info_text)
builds = BuildTable(client=self.client, url=message.url)
- await self.container.update(info, builds)
+ widgets: List[Widget] = [info, builds]
+ await self.container.update(widgets=widgets)
+ await self.container.refresh_layout()
if message.node_name != self.current_node:
self.current_node = message.node_name
@@ -212,8 +202,8 @@ async def action_refresh_tree(self) -> None:
self.log("Handling action refresh_tree")
directory = JenkinsTree(client=self.client, name="JenkinsTreeWidget")
- await self.directory_scroll_view.update(directory)
- self.directory_scroll_view.refresh(layout=True)
+ await self.tree_container.update(directory)
+ self.tree_container.refresh(layout=True)
def run():
diff --git a/src/jenkins_tui/views/__init__.py b/src/jenkins_tui/views/__init__.py
index 7c71b4a..217891c 100644
--- a/src/jenkins_tui/views/__init__.py
+++ b/src/jenkins_tui/views/__init__.py
@@ -1,3 +1,4 @@
from .window_view import WindowView
+from .scroll_view import TestScrollView
-__all__ = "WindowView"
+__all__ = ("WindowView", "TestScrollView")
diff --git a/src/jenkins_tui/views/scroll_view.py b/src/jenkins_tui/views/scroll_view.py
new file mode 100644
index 0000000..b01407a
--- /dev/null
+++ b/src/jenkins_tui/views/scroll_view.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+from typing import List
+
+from textual.widget import Widget
+
+
+from rich.console import RenderableType
+
+from textual.widgets import ScrollView
+
+
+class TestScrollView(ScrollView):
+ def __init__(self, contents: List[RenderableType | Widget]) -> None:
+ super().__init__()
+
+ from .window_view import WindowView
+
+ self.window = WindowView(contents)
+
+ async def update(self, widgets: List[RenderableType], home: bool = True) -> None:
+ if home:
+ self.home()
+ await self.window.update(widgets=widgets)
diff --git a/src/jenkins_tui/views/window_view.py b/src/jenkins_tui/views/window_view.py
index db96e17..d52ec05 100644
--- a/src/jenkins_tui/views/window_view.py
+++ b/src/jenkins_tui/views/window_view.py
@@ -1,15 +1,17 @@
from __future__ import annotations
-from typing import Optional, cast
+from typing import List
from textual import events
-from textual.geometry import Size
-from textual.layouts.dock import Dock, DockEdge, DockLayout
-from textual.view import View
from textual import messages
+from textual.geometry import Size, SpacingDimensions
from textual.widget import Widget
+from textual.view import View
+from textual.layouts.vertical import VerticalLayout
from textual.views._window_view import WindowChange
+from rich.console import RenderableType
+
class DoNotSet:
pass
@@ -19,58 +21,27 @@ class DoNotSet:
class WindowView(View):
- """A copy of textual.views.WindowView that implements docking. This will be refactored in the future."""
-
- def __init__(self) -> None:
- name = self.__class__.__name__
- super().__init__(layout=DockLayout(), name=name)
-
- async def dock(
+ def __init__(
self,
- *widgets: Widget,
- edge: DockEdge = "top",
- z: int = 0,
- size: int | None | DoNotSet = do_not_set,
+ widgets: List[RenderableType | Widget],
+ *,
+ auto_width: bool = False,
+ gutter: SpacingDimensions = (0, 0),
name: str | None = None
) -> None:
-
- dock = Dock(edge, widgets, z)
- assert isinstance(self.layout, DockLayout)
- self.layout.docks.append(dock)
-
+ layout = VerticalLayout(gutter=gutter, auto_width=auto_width)
for widget in widgets:
- if size is not do_not_set:
- widget.layout_size = cast(Optional[int], size)
- if name is None:
- await self.mount(widget)
- else:
- await self.mount(**{name: widget})
- await self.refresh_layout()
+ layout.add(widget)
+ super().__init__(name=name, layout=layout)
- async def update(
- self,
- *widgets: Widget,
- edge: DockEdge = "top",
- z: int = 0,
- size: int | None | DoNotSet = do_not_set,
- name: str | None = None
- ) -> None:
-
- assert isinstance(self.layout, DockLayout)
- self.layout.docks.clear()
-
- dock = Dock(edge, widgets, z)
- self.layout.docks.append(dock)
+ async def update(self, widgets: List[RenderableType | Widget]) -> None:
+ layout = self.layout
+ assert isinstance(layout, VerticalLayout)
+ layout.clear()
for widget in widgets:
- if size is not do_not_set:
- widget.layout_size = cast(Optional[int], size)
- if name is None:
- await self.mount(widget)
- else:
- await self.mount(**{name: widget})
+ layout.add(widget)
- self.layout.require_update()
await self.refresh_layout()
await self.emit(WindowChange(self))
diff --git a/src/jenkins_tui/widgets/build_queue.py b/src/jenkins_tui/widgets/build_queue.py
index ea90ec9..ee75ab1 100644
--- a/src/jenkins_tui/widgets/build_queue.py
+++ b/src/jenkins_tui/widgets/build_queue.py
@@ -60,7 +60,7 @@ async def _get_renderable(self):
async def _update(self):
"""Update the current renderable object."""
await self._get_renderable()
- self.refresh()
+ self.refresh(layout=True)
async def on_mount(self, event: Mount):
"""Actions that are executed when the widget is mounted.
@@ -68,7 +68,7 @@ async def on_mount(self, event: Mount):
Args:
event (events.Mount): A mount event.
"""
- await self._get_renderable()
+ await self._update()
self.set_interval(10, self._update)
def render(self) -> RenderableType:
diff --git a/src/jenkins_tui/widgets/build_table.py b/src/jenkins_tui/widgets/build_table.py
index e3bdbe1..4b161de 100644
--- a/src/jenkins_tui/widgets/build_table.py
+++ b/src/jenkins_tui/widgets/build_table.py
@@ -1,7 +1,6 @@
-from typing import Dict, List, Union
+from typing import Dict, Union
from urllib.parse import urlparse
from rich.console import RenderableType
-from rich.panel import Panel
from rich.style import Style
from rich.table import Table
from rich.align import Align
@@ -30,7 +29,6 @@ def __init__(self, client: Jenkins, url: str) -> None:
self.current_job_url = url
name = self.__class__.__name__
super().__init__(name=name)
-
self.renderable: RenderableType = ""
def _get_style_from_result(self, result: str) -> Union[str, Style]:
@@ -98,18 +96,17 @@ async def _get_renderable(self):
async def _update(self):
"""Update the current renderable object."""
await self._get_renderable()
- self.refresh()
+ self.refresh(layout=True)
- async def on_mount(self, event: Mount):
+ async def on_mount(self, event: Mount) -> None:
"""Actions that are executed when the widget is mounted.
Args:
event (events.Mount): A mount event.
"""
- await self._get_renderable()
+ await self._update()
self.set_interval(10, self._update)
def render(self) -> RenderableType:
"""Overrides render from textual.widget.Widget"""
-
- return Panel(renderable=self.renderable, title="builds", expand=True)
+ return self.renderable
diff --git a/src/jenkins_tui/widgets/executor_status.py b/src/jenkins_tui/widgets/executor_status.py
index a21b87c..bd1a17b 100644
--- a/src/jenkins_tui/widgets/executor_status.py
+++ b/src/jenkins_tui/widgets/executor_status.py
@@ -70,7 +70,7 @@ async def _get_renderable(self):
async def _update(self):
"""Update the current renderable object."""
await self._get_renderable()
- self.refresh()
+ self.refresh(layout=True)
async def on_mount(self, event: Mount):
"""Actions that are executed when the widget is mounted.
@@ -78,7 +78,7 @@ async def on_mount(self, event: Mount):
Args:
event (events.Mount): A mount event.
"""
- await self._get_renderable()
+ await self._update()
self.set_interval(10, self._update)
def render(self) -> RenderableType:
| Re-implement scroll view on top of custom WindowView
I've used a custom implementation of textual.views.WindowView that supports docking. Ideally the content renderable (to the right) should be nested inside a ScrollView as it was originally.
| 2021-10-12T20:55:56 | 0.0 | [] | [] |
|||
keshavdv/victron-ble | keshavdv__victron-ble-8 | 208901665b026e19addb590a803841ceddce4764 | diff --git a/victron_ble/scanner.py b/victron_ble/scanner.py
index 361f438..d01310e 100644
--- a/victron_ble/scanner.py
+++ b/victron_ble/scanner.py
@@ -99,6 +99,8 @@ def callback(self, ble_device: BLEDevice, raw_data: bytes):
)
try:
device = self.get_device(ble_device, raw_data)
+ except AdvertisementKeyMissingError:
+ return
except UnknownDeviceError as e:
logger.error(e)
return
| Make Scanner easier to override
At the moment, if you want to do something with this as a library, you end up reimplementing a lot of the current `callback()` method. We can factor those functions out:
1. Move the device lookup from callback() into a helper function that subclasses can use.
2. Allow subclasses to customize advertisement key loading.
| 2022-12-29T15:18:36 | 0.0 | [] | [] |
|||
lithops-cloud/lithops | lithops-cloud__lithops-964 | 03374840c07a24f99fe2fb218da1f6f31ccd2abf | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8c72f8229..804d0febf 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,7 @@
- [K8s] Append 'docker_server' as a prefix of the runtime
- [Code Engine] Append 'docker_server' as a prefix of the runtime
- [Knative] Append 'docker_server' as a prefix of the runtime
+- [Google Cloud] Allow to use GOOGLE_APPLICATION_CREDENTIALS for service_credentials
- [Google Cloud Run] Allow CPU values <1, 6 and 8
- [Alibaba Cloud Functions] Added Python 3.9 runtime compatibility
- [Alibaba Cloud Functions] Allow to build a runtime from a custom requirements.txt file
diff --git a/docs/source/compute_config/gcp_cloudrun.md b/docs/source/compute_config/gcp_cloudrun.md
index f0acd5c7d..b7ded2c2f 100644
--- a/docs/source/compute_config/gcp_cloudrun.md
+++ b/docs/source/compute_config/gcp_cloudrun.md
@@ -38,8 +38,8 @@ python3 -m install lithops[gcp]
gcp:
project_name : <PROJECT_ID>
service_account : <SERVICE_ACCOUNT_EMAIL>
- credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
region : <REGION_NAME>
+ credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
```
## Summary of configuration keys for Google Cloud
@@ -50,8 +50,8 @@ python3 -m install lithops[gcp]
|---|---|---|---|---|
|gcp | project_name | |yes | Project id given by Google Cloud Platform in step 3 (e.g. lithops-876385) |
|gcp | service_account | |yes | Service account email of the service account created on step 5 (e.g. `[email protected]`) |
-|gcp | credentials_path | |yes | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`) |
|gcp | region | |yes | Region of the bucket created at step 8. Functions and pub/sub queue will be created in the same region (e.g. `us-east1`) |
+|gcp | credentials_path | | no | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`). Alternatively you can set `GOOGLE_APPLICATION_CREDENTIALS` environment variable. If not provided it will try to load the default credentials from the environment|
### Google Cloud Run
|Group|Key|Default|Mandatory|Additional info|
diff --git a/docs/source/compute_config/gcp_functions.md b/docs/source/compute_config/gcp_functions.md
index 5a193400b..a367dfd07 100644
--- a/docs/source/compute_config/gcp_functions.md
+++ b/docs/source/compute_config/gcp_functions.md
@@ -39,8 +39,8 @@ python3 -m install lithops[gcp]
gcp:
project_name : <PROJECT_ID>
service_account : <SERVICE_ACCOUNT_EMAIL>
- credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
region : <REGION_NAME>
+ credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
```
## Summary of configuration keys for Google:
@@ -51,8 +51,8 @@ python3 -m install lithops[gcp]
|---|---|---|---|---|
|gcp | project_name | |yes | Project id given by Google Cloud Platform in step 3 (e.g. `lithops-876385`) |
|gcp | service_account | |yes | Service account email of the service account created on step 5 (e.g. `[email protected]`) |
-|gcp | credentials_path | |yes | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`) |
|gcp | region | |yes | Region of the bucket created at step 8. Functions and pub/sub queue will be created in the same region (e.g. `us-east1`) |
+|gcp | credentials_path | |yes | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`). Alternatively you can set `GOOGLE_APPLICATION_CREDENTIALS` environment variable. If not provided it will try to load the default credentials from the environment|
### Google Cloud Functions
|Group|Key|Default|Mandatory|Additional info|
diff --git a/docs/source/storage_config/gcp_storage.md b/docs/source/storage_config/gcp_storage.md
index edac1c0b3..cac319532 100644
--- a/docs/source/storage_config/gcp_storage.md
+++ b/docs/source/storage_config/gcp_storage.md
@@ -39,8 +39,8 @@ $ python3 -m pip install lithops[gcp]
gcp:
project_name : <<PROJECT_ID>>
service_account : <SERVICE_ACCOUNT_EMAIL>
- credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
region : <REGION_NAME>
+ credentials_path : <FULL_PATH_TO_CREDENTIALS_JSON>
gcp_storage:
storage_bucket: <BUCKET_NAME>
@@ -54,8 +54,8 @@ $ python3 -m pip install lithops[gcp]
|---|---|---|---|---|
|gcp | project_name | |yes | Project id given by Google Cloud Platform in step 3 (e.g. lithops-876385) |
|gcp | service_account | |yes | Service account email of the service account created on step 5 (e.g. `[email protected]`) |
-|gcp | credentials_path | |yes | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`) |
|gcp | region | |yes | Region of the bucket created at step 8. Functions and pub/sub queue will be created in the same region (e.g. `us-east1`) |
+|gcp | credentials_path | |no | **Absolute** path of your JSON key file downloaded in step 7 (e.g. `/home/myuser/lithops-invoker1234567890.json`). Alternatively you can set `GOOGLE_APPLICATION_CREDENTIALS` environment variable. If not provided it will try to load the default credentials from the environment |
### Google Cloud Storage
|Group|Key|Default|Mandatory|Additional info|
diff --git a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py
index d57fcf8f3..95ffd950c 100644
--- a/lithops/serverless/backends/gcp_cloudrun/cloudrun.py
+++ b/lithops/serverless/backends/gcp_cloudrun/cloudrun.py
@@ -19,17 +19,20 @@
import os
import time
import json
-
+import urllib
import yaml
+import google.auth
+import google.oauth2.id_token
+from threading import Lock
+from google.oauth2 import service_account
+from google_auth_httplib2 import AuthorizedHttp
+from googleapiclient.discovery import build
from lithops import utils
from lithops.constants import COMPUTE_CLI_MSG
from lithops.version import __version__
-from google.oauth2 import service_account
-from google_auth_httplib2 import AuthorizedHttp
-from google.auth.transport.requests import AuthorizedSession
-from googleapiclient.discovery import build
+invoke_mutex = Lock()
from . import config
@@ -45,15 +48,18 @@ def __init__(self, cloudrun_config, internal_storage):
self.name = 'gcp_cloudrun'
self.type = 'faas'
self.cr_config = cloudrun_config
- self.credentials_path = cloudrun_config['credentials_path']
+ self.credentials_path = cloudrun_config.get('credentials_path')
self.service_account = cloudrun_config['service_account']
self.project_name = cloudrun_config['project_name']
self.region = cloudrun_config['region']
- self._invoker_sess = None
- self._invoker_sess_route = '/'
+ self._api_resource = self._build_api_resource()
+
self._service_url = None
- self._api_resource = None
+ self._id_token = None
+
+ if self.credentials_path and os.path.isfile(self.credentials_path):
+ logger.debug(f'Getting GCP credentials from {self.credentials_path}')
msg = COMPUTE_CLI_MSG.format('Google Cloud Run')
logger.info(f"{msg} - Region: {self.region} - Project: {self.project_name}")
@@ -81,45 +87,46 @@ def _build_api_resource(self):
"""
Instantiate and authorize admin discovery API session
"""
- if self._api_resource is None:
- logger.debug('Building admin API session')
+ logger.debug('Building admin API session')
+ if os.path.isfile(self.credentials_path):
credentials = service_account.Credentials.from_service_account_file(self.credentials_path, scopes=SCOPES)
- http = AuthorizedHttp(credentials, http=httplib2.Http())
- self._api_resource = build(
- 'run', CLOUDRUN_API_VERSION,
- http=http, cache_discovery=False,
- client_options={
- 'api_endpoint': f'https://{self.region}-run.googleapis.com'
- }
- )
- return self._api_resource
-
- def _build_invoker_sess(self, runtime_name, memory, route):
- """
- Instantiate and authorize invoker session for a specific service and route
- """
- if self._invoker_sess is None or route != self._invoker_sess_route:
- logger.debug('Building invoker session')
- target = self._get_service_endpoint(runtime_name, memory) + route
- credentials = (service_account
- .IDTokenCredentials
- .from_service_account_file(self.credentials_path, target_audience=target))
- self._invoker_sess = AuthorizedSession(credentials)
- self._invoker_sess_route = route
- return self._invoker_sess
-
- def _get_service_endpoint(self, runtime_name, memory):
+ else:
+ credentials, _ = google.auth.default(scopes=SCOPES)
+ http = AuthorizedHttp(credentials, http=httplib2.Http())
+ api_resource = build(
+ 'run', CLOUDRUN_API_VERSION,
+ http=http, cache_discovery=False,
+ client_options={
+ 'api_endpoint': f'https://{self.region}-run.googleapis.com'
+ }
+ )
+
+ return api_resource
+
+ def _get_url_and_token(self, runtime_name, memory):
"""
- Gets service endpoint URL from runtime name and memory
+ Generates a connection token
"""
- if self._service_url is None:
+ invoke_mutex.acquire()
+ request_token = False
+ if not self._service_url or (self._service_url and str(memory) not in self._service_url):
logger.debug('Getting service endpoint')
svc_name = self._format_service_name(runtime_name, memory)
- res = self._build_api_resource().namespaces().services().get(
+ res = self._api_resource.namespaces().services().get(
name=f'namespaces/{self.project_name}/services/{svc_name}'
).execute()
self._service_url = res['status']['url']
- return self._service_url
+ request_token = True
+ logger.debug(f'Service endpoint url is {self._service_url}')
+
+ if not self._id_token or request_token:
+ if self.credentials_path and os.path.isfile(self.credentials_path):
+ os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = self.credentials_path
+ auth_req = google.auth.transport.requests.Request()
+ self._id_token = google.oauth2.id_token.fetch_id_token(auth_req, self._service_url)
+ invoke_mutex.release()
+
+ return self._service_url, self._id_token
def _format_image_name(self, runtime_name):
"""
@@ -181,7 +188,7 @@ def invoke(self, runtime_name, memory, payload, return_result=False):
job_id = payload.get('job_id')
route = payload.get("service_route", '/')
- sess = self._build_invoker_sess(runtime_name, memory, route)
+ service_url, id_token = self._get_url_and_token(runtime_name, memory)
if exec_id and job_id and call_id:
logger.debug(f'ExecutorID {exec_id} | JobID {job_id} - Invoking function call {call_id}')
@@ -190,11 +197,12 @@ def invoke(self, runtime_name, memory, payload, return_result=False):
else:
logger.debug('Invoking function')
- url = self._get_service_endpoint(runtime_name, memory) + route
- res = sess.post(url=url, data=json.dumps(payload, default=str))
+ req = urllib.request.Request(service_url+route, data=json.dumps(payload, default=str).encode('utf-8'))
+ req.add_header("Authorization", f"Bearer {id_token}")
+ res = urllib.request.urlopen(req)
- if res.status_code in (200, 202):
- data = res.json()
+ if res.getcode() in (200, 202):
+ data = json.loads(res.read())
if return_result:
return data
return data["activationId"]
@@ -272,7 +280,7 @@ def _create_service(self, runtime_name, runtime_memory, timeout):
container['resources']['requests']['cpu'] = str(self.cr_config['runtime_cpu'])
logger.info(f"Creating runtime: {runtime_name}")
- res = self._build_api_resource().namespaces().services().create(
+ res = self._api_resource.namespaces().services().create(
parent=f'namespaces/{self.project_name}', body=svc_res
).execute()
@@ -283,7 +291,7 @@ def _create_service(self, runtime_name, runtime_memory, timeout):
retry = 15
logger.debug(f'Waiting {service_name} service to become ready')
while not ready:
- res = self._build_api_resource().namespaces().services().get(
+ res = self._api_resource.namespaces().services().get(
name=f'namespaces/{self.project_name}/services/{service_name}'
).execute()
@@ -314,7 +322,7 @@ def _wait_service_deleted(self, service_name):
# Wait until the service is completely deleted
while True:
try:
- res = self._build_api_resource().namespaces().services().get(
+ res = self._api_resource.namespaces().services().get(
name=f'namespaces/{self.project_name}/services/{service_name}'
).execute()
time.sleep(1)
@@ -325,7 +333,7 @@ def delete_runtime(self, runtime_name, memory):
service_name = self._format_service_name(runtime_name, memory)
logger.info(f'Deleting runtime: {runtime_name} - {memory}MB')
try:
- self._build_api_resource().namespaces().services().delete(
+ self._api_resource.namespaces().services().delete(
name=f'namespaces/{self.project_name}/services/{service_name}'
).execute()
self._wait_service_deleted(service_name)
@@ -343,7 +351,7 @@ def clean(self):
def list_runtimes(self, runtime_name='all'):
logger.debug('Listing runtimes')
- res = self._build_api_resource().namespaces().services().list(
+ res = self._api_resource.namespaces().services().list(
parent=f'namespaces/{self.project_name}',
).execute()
diff --git a/lithops/serverless/backends/gcp_cloudrun/config.py b/lithops/serverless/backends/gcp_cloudrun/config.py
index d5a4d79e1..854f5a1a9 100644
--- a/lithops/serverless/backends/gcp_cloudrun/config.py
+++ b/lithops/serverless/backends/gcp_cloudrun/config.py
@@ -19,7 +19,7 @@
logger = logging.getLogger(__name__)
-REQ_PARAMS = ('project_name', 'service_account', 'credentials_path', 'region')
+REQ_PARAMS = ('project_name', 'service_account', 'region')
DEFAULT_CONFIG_KEYS = {
'runtime_timeout': 300, # Default: 600 seconds => 10 minutes
@@ -80,6 +80,7 @@
cryptography \
httplib2 \
google-cloud-storage \
+ google-cloud-pubsub \
google-api-python-client \
gcsfs \
google-auth
@@ -147,13 +148,15 @@ def load_config(config_data):
for param in REQ_PARAMS:
if param not in config_data['gcp']:
- msg = "{} is mandatory under 'gcp' section of the configuration".format(REQ_PARAMS)
+ msg = f"{param} is mandatory under 'gcp' section of the configuration"
raise Exception(msg)
- config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
+ if 'credentials_path' not in config_data['gcp']:
+ if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ:
+ config_data['gcp']['credentials_path'] = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
- if not os.path.isfile(config_data['gcp']['credentials_path']):
- raise Exception(f"Credentials file {config_data['gcp']['credentials_path']} not found")
+ if 'credentials_path' in config_data['gcp']:
+ config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
for key in DEFAULT_CONFIG_KEYS:
if key not in config_data['gcp_cloudrun']:
diff --git a/lithops/serverless/backends/gcp_functions/config.py b/lithops/serverless/backends/gcp_functions/config.py
index abac86b75..f8eccc385 100644
--- a/lithops/serverless/backends/gcp_functions/config.py
+++ b/lithops/serverless/backends/gcp_functions/config.py
@@ -35,7 +35,7 @@
USER_RUNTIMES_PREFIX = 'lithops.user_runtimes'
-REQ_PARAMS = ('project_name', 'service_account', 'credentials_path', 'region')
+REQ_PARAMS = ('project_name', 'service_account', 'region')
DEFAULT_CONFIG_KEYS = {
'runtime_timeout': 300, # Default: 5 minutes
@@ -55,6 +55,7 @@
google-cloud-storage
google-cloud-pubsub
google-auth
+google-api-python-client
certifi
chardet
docutils
@@ -85,10 +86,12 @@ def load_config(config_data=None):
msg = f"{param} is mandatory under 'gcp' section of the configuration"
raise Exception(msg)
- config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
+ if 'credentials_path' not in config_data['gcp']:
+ if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ:
+ config_data['gcp']['credentials_path'] = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
- if not os.path.isfile(config_data['gcp']['credentials_path']):
- raise Exception(f"Credentials file {config_data['gcp']['credentials_path']} not found")
+ if 'credentials_path' in config_data['gcp']:
+ config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
for key in DEFAULT_CONFIG_KEYS:
if key not in config_data['gcp_functions']:
diff --git a/lithops/serverless/backends/gcp_functions/gcp_functions.py b/lithops/serverless/backends/gcp_functions/gcp_functions.py
index e8f6f0956..f88a35b54 100644
--- a/lithops/serverless/backends/gcp_functions/gcp_functions.py
+++ b/lithops/serverless/backends/gcp_functions/gcp_functions.py
@@ -21,6 +21,7 @@
import httplib2
import zipfile
import time
+import google.auth
from google.cloud import pubsub_v1
from google.oauth2 import service_account
from google_auth_httplib2 import AuthorizedHttp
@@ -45,7 +46,7 @@ def __init__(self, gcf_config, internal_storage):
self.region = gcf_config['region']
self.service_account = gcf_config['service_account']
self.project_name = gcf_config['project_name']
- self.credentials_path = gcf_config['credentials_path']
+ self.credentials_path = gcf_config.get('credentials_path')
self.num_retries = gcf_config['retries']
self.retry_sleep = gcf_config['retry_sleep']
self.trigger = gcf_config['trigger']
@@ -59,7 +60,9 @@ def __init__(self, gcf_config, internal_storage):
service_account_info,
audience=config.AUDIENCE
)
+ logger.debug(f'Getting GCP credentials from {self.credentials_path}')
except Exception as e: # Get credentials from gcp function environment
+ logger.debug(f'Getting GCP credentials from the environment')
credentials = None
self.publisher_client = pubsub_v1.PublisherClient(credentials=credentials)
@@ -106,8 +109,10 @@ def _encode_payload(self, payload):
return base64.b64encode(bytes(json.dumps(payload), 'utf-8')).decode('utf-8')
def _get_auth_session(self):
- credentials = service_account.Credentials.from_service_account_file(self.credentials_path,
- scopes=config.SCOPES)
+ if os.path.isfile(self.credentials_path):
+ credentials = service_account.Credentials.from_service_account_file(self.credentials_path, scopes=config.SCOPES)
+ else:
+ credentials, _ = google.auth.default(scopes=config.SCOPES)
http = httplib2.Http()
return AuthorizedHttp(credentials, http=http)
diff --git a/lithops/storage/backends/gcp_storage/config.py b/lithops/storage/backends/gcp_storage/config.py
index 46152386b..263343e41 100644
--- a/lithops/storage/backends/gcp_storage/config.py
+++ b/lithops/storage/backends/gcp_storage/config.py
@@ -17,7 +17,7 @@
import os
-REQ_PARAMS = ('project_name', 'service_account', 'credentials_path', 'region')
+REQ_PARAMS = ('project_name', 'service_account', 'region')
def load_config(config_data=None):
@@ -29,10 +29,12 @@ def load_config(config_data=None):
msg = f"'{param}' is mandatory under 'gcp' section of the configuration"
raise Exception(msg)
- config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
+ if 'credentials_path' not in config_data['gcp']:
+ if 'GOOGLE_APPLICATION_CREDENTIALS' in os.environ:
+ config_data['gcp']['credentials_path'] = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
- if not os.path.isfile(config_data['gcp']['credentials_path']):
- raise Exception(f"Credentials file {config_data['gcp']['credentials_path']} not found")
+ if 'credentials_path' in config_data['gcp']:
+ config_data['gcp']['credentials_path'] = os.path.expanduser(config_data['gcp']['credentials_path'])
config_data['gcp_storage'].update(config_data['gcp'])
diff --git a/lithops/storage/backends/gcp_storage/gcp_storage.py b/lithops/storage/backends/gcp_storage/gcp_storage.py
index daa726486..1945f9887 100644
--- a/lithops/storage/backends/gcp_storage/gcp_storage.py
+++ b/lithops/storage/backends/gcp_storage/gcp_storage.py
@@ -34,10 +34,12 @@
class GCPStorageBackend:
def __init__(self, gcp_storage_config):
logger.debug("Creating GCP Storage client")
- self.credentials_path = gcp_storage_config['credentials_path']
+ self.credentials_path = gcp_storage_config.get('credentials_path')
try: # Get credenitals from JSON file
self.client = storage.Client.from_service_account_json(self.credentials_path)
+ logger.debug(f'Getting GCP credentials from {self.credentials_path}')
except Exception: # Get credentials from gcp function environment
+ logger.debug(f'Getting GCP credentials from the environment')
self.client = storage.Client()
msg = STORAGE_CLI_MSG.format('Google Cloud Storage')
logger.info("{}".format(msg))
| Allow authenticating to GCP via service account provided by compute engine
Very cool project! Am excited to play with it :)
I'm trying to set this up with GCP, on a kubernetes cluster with [workload identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) setup to provide GCP IAM permissions to workloads. All the google cloud sdk clients (including the python one used by this project) know how to use this to 'automatically' find credentials and other metadata (see https://cloud.google.com/docs/authentication/production#automatically). However, because in lithops we are explicitly looking for a service account JSON key file (https://github.com/lithops-cloud/lithops/blob/03374840c07a24f99fe2fb218da1f6f31ccd2abf/lithops/serverless/backends/gcp_cloudrun/cloudrun.py#L48), this automatic behavior of the underlying libraries is overriden, and I get the following error when trying to run the hello world:
```
File /srv/conda/envs/notebook/lib/python3.9/site-packages/lithops/serverless/backends/gcp_cloudrun/config.py:96, in load_config(config_data)
94 if param not in config_data['gcp']:
95 msg = "{} is mandatory under 'gcp' section of the configuration".format(REQ_PARAMS)
---> 96 raise Exception(msg)
98 if not exists(config_data['gcp']['credentials_path']) or not isfile(config_data['gcp']['credentials_path']):
99 raise Exception("Path {} must be service account "
100 "credential JSON file.".format(config_data['gcp']['credentials_path']))
Exception: ('project_name', 'service_account', 'credentials_path', 'region') is mandatory under 'gcp' section of the configuration
```
for the following code:
```python
config = {'lithops': {'backend': 'gcp_cloudrun', 'storage': 'gcp_storage'},
'gcp': {},
'gcp_storage': {'storage_bucket': 'leap-scratch',
}}
def hello_world(name):
return 'Hello {}!'.format(name)
if __name__ == '__main__':
fexec = lithops.FunctionExecutor(config=config)
fexec.call_async(hello_world, 'World')
print(fexec.get_result())
```
If the auth params (service account key, etc) are not explicitly set, lithops should try to have the gcp libraries auto discover them. This would work both with application default credentials as well as on compute engine / GKE, and would make running this far simpler in those cases.
Thank you for working on this!
| 2022-07-11T17:59:33 | 0.0 | [] | [] |
|||
pi-top/pi-top-Python-SDK | pi-top__pi-top-Python-SDK-287 | 17a91750b638f7f17bfc34310bdfaac04a823f73 | diff --git a/pitop/miniscreen/oled/oled.py b/pitop/miniscreen/oled/oled.py
index c61b255f4..c094f714f 100644
--- a/pitop/miniscreen/oled/oled.py
+++ b/pitop/miniscreen/oled/oled.py
@@ -399,13 +399,15 @@ def get_text_size(text):
self.display_image(image)
def __display(self, image_to_display, force=False, invert=False):
- self.__fps_regulator.stop_timer()
+ self.stop_animated_image()
if invert:
image_to_display = ImageOps.invert(
image_to_display.convert('L')
).convert('1')
+ self.__fps_regulator.stop_timer()
+
if force or self.should_redisplay(image_to_display):
self.device.display(image_to_display)
| OLED draw actions should override background thread
Currently, the OLED can play animated images in a background thread. While this is occurring, it is possible to call other functions in the main thread that draw to the display.
This situation should be handled in a more elegant way.
Possible solutions:
* all draw actions are ignored until the animation is explicitly stopped or finished
* draw actions block, background animation ends on next loop
* background action is immediately stopped in favour of next requested action
| @m-roberts I like the idea of blocking behaviour to wait until current animation loop finishes (option 2) - this is nice to have in simple user programs as you don't have to manage it yourself by waiting with a sleep
Could we add an optional parameter in for `force=True` so that it could force the background action to stop immediately?
The issues with blocking on current animation loop is that it could be a really long GIF, and you would get locked out. Trying to get any smarter than that requires handling timeouts, etc. which just gets messy fast.
I would say that if you provide a newer instruction to display on the OLED, it should either override anything in the background immediately, or fail until the user has explicitly taken action to end what they are doing in the background thread..
Jorge and I agree that newer requests should override | 2021-03-26T20:51:03 | 0.0 | [] | [] |
||
meta-llama/llama-recipes | meta-llama__llama-recipes-407 | 6a7478a6aaf198b2c8de6873a0b81e8463574308 | diff --git a/src/llama_recipes/finetuning.py b/src/llama_recipes/finetuning.py
index 6b5650b20..d27685777 100644
--- a/src/llama_recipes/finetuning.py
+++ b/src/llama_recipes/finetuning.py
@@ -9,7 +9,7 @@
import random
import torch
import torch.optim as optim
-from peft import get_peft_model, prepare_model_for_int8_training
+from peft import get_peft_model, prepare_model_for_kbit_training
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
ShardingStrategy
@@ -144,7 +144,7 @@ def main(**kwargs):
# Prepare the model for int8 training if quantization is enabled
if train_config.quantization:
- model = prepare_model_for_int8_training(model)
+ model = prepare_model_for_kbit_training(model)
# Convert the model to bfloat16 if fsdp and pure_bf16 is enabled
if train_config.enable_fsdp and fsdp_config.pure_bf16:
| peft library cant find the package named prepare_model_for_int8_training

| 2024-03-21T15:36:15 | 0.0 | [] | [] |
|||
adafruit/Adafruit_CircuitPython_RFM9x | adafruit__Adafruit_CircuitPython_RFM9x-85 | 04eba3d910ddcb0c8faa4c49c6089289aa779a7e | diff --git a/adafruit_rfm9x.py b/adafruit_rfm9x.py
index fa0dd77..17d5b02 100644
--- a/adafruit_rfm9x.py
+++ b/adafruit_rfm9x.py
@@ -259,7 +259,7 @@ def __set__(self, obj: "RFM9x", val: int) -> None:
def __init__(
self,
spi: SPI,
- cs: DigitalInOut,
+ cs: DigitalInOut, # pylint: disable=invalid-name
reset: DigitalInOut,
frequency: int,
*,
| Library uses short argument names
`pylint` suggests using argument names with at least 3 letters. This library uses argument names of shorter length, and while these warnings have been disabled for now, they should be considered for renaming. This may require the rework of Learn Guides and other references to code snippets.
| 2022-11-09T19:25:01 | 0.0 | [] | [] |
|||
OwenTruong/civitdl | OwenTruong__civitdl-91 | df472fe9e211cd01ff37a6b321246deb139b00e3 | diff --git a/pyproject.toml b/pyproject.toml
index 2671b0e..d4a3794 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "civitdl"
-version = "2.0.11"
+version = "2.0.12"
authors = [
{ name = "Owen Truong" }
]
diff --git a/src/civitdl/batch/_get_model.py b/src/civitdl/batch/_get_model.py
index eec4c10..c8e25bc 100644
--- a/src/civitdl/batch/_get_model.py
+++ b/src/civitdl/batch/_get_model.py
@@ -180,7 +180,7 @@ def get_version_file():
print_verbose(f'Model Download API URL: {metadata.model_download_url}')
res = batchOptions.session.get(metadata.model_download_url, stream=True)
- if 'reason=download-auth' in res.url:
+ if 'reason=download-auth' in res.url or res.status_code == 401:
api_key_needed = True
if batchOptions.api_key:
headers = {
| 401 errors for some models?
I'm not sure why but some models refuse to download (even with an API key) and return a 401 error, despite being able to download them manually from the website directly.
For example:
```
---------
APIException (API Status Code 401):
Downloading model from CivitAI failed for model id, 282171, and version id, 317633
---------
Retrying to download the current model...
Now downloading "Yellow summer dress"...
- Model ID: 282171
- Version ID: 317633
---------
```
I've tried waiting and retrying, even waiting a whole day just in case it's one of their weird proxy errors, but nada.
| Strange, when I use wget it is able to fetch the model.... I will try to investigate more about this.
`
wget https://civitai.com/api/download/models/317633?token={api_key} --content-disposition
`
Edit:
This python code gives back status 200 too.. strange
`
session = requests.session()
res = session.get(
f"https://civitai.com/api/download/models/317633", stream=True, headers=headers)
print(res.status_code)
` | 2024-01-29T18:49:22 | 0.0 | [] | [] |
||
DIRACGrid/WebAppDIRAC | DIRACGrid__WebAppDIRAC-670 | 4119ee420dc9fd793ed999d524ca9c7c348781f9 | diff --git a/src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py b/src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py
index 87d7e9c24..8de635528 100644
--- a/src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py
+++ b/src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py
@@ -6,20 +6,20 @@
from DIRAC.ResourceStatusSystem.Client.PublisherClient import PublisherClient
from DIRAC.ResourceStatusSystem.PolicySystem.StateMachine import RSSMachine
-from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen
+from WebAppDIRAC.Lib.WebHandler import _WebHandler as WebHandler
class SummaryHandlerMix(WebHandler):
- AUTH_PROPS = "all"
+ DEFAULT_AUTHORIZATION = "all"
ELEMENT_TYPE = None
- def _getSelectionData(self) -> dict:
+ def _getSelectionData(self, **kwargs) -> dict:
"""It returns the possible selection data"""
callback = {"name": set(), "elementType": set(), "status": set(), "statusType": set(), "tokenOwner": set()}
pub = PublisherClient()
- gLogger.info("Arguments to web_getSelectionData", repr(self.request.arguments))
+ gLogger.info("Arguments to web_getSelectionData", repr(kwargs))
elementStatuses = pub.getElementStatuses(self.ELEMENT_TYPE, None, None, None, None, None)
if elementStatuses["OK"]:
@@ -62,19 +62,6 @@ def combine(self, elementValues: list) -> dict:
status = "Banned"
reason = "Not usable"
- # if set( [ 'Unknown','Active', 'Degraded' ] ) & set( statusSet ):
- # for upStatus in [ 'Active', 'Degraded' ]:
- # if upStatus in statusSet:
- # status = upStatus
- # reason = '%d %s' % ( statuses.count( upStatus ), upStatus )
- # break
- # else:
- # for downStatus in [ 'Unknown','Probing','Banned','Error' ]:
- # if downStatus in statusSet:
- # status = downStatus
- # reason = '%d %s' % ( statuses.count( downStatus ), downStatus )
- # break
-
# Make a copy
combined = {}
combined.update(elementValues[0])
@@ -88,15 +75,12 @@ def combine(self, elementValues: list) -> dict:
return combined
- def _expand(self) -> dict:
+ def _expand(self, name) -> dict:
"""This method handles the POST requests"""
+ if name:
+ name = list(json.loads(name))
- requestParams = self._requestParams()
- gLogger.info(requestParams)
-
- pub = PublisherClient()
-
- elements = pub.getElementStatuses(self.ELEMENT_TYPE, requestParams["name"], None, None, None, None)
+ elements = PublisherClient().getElementStatuses(self.ELEMENT_TYPE, name, None, None, None, None)
if not elements["OK"]:
return {"success": "false", "error": elements["Message"]}
@@ -108,79 +92,65 @@ def _expand(self) -> dict:
return {"success": "true", "result": elementList, "total": len(elementList)}
- def _action(self) -> dict:
+ def _action(self, action, **kwargs) -> dict:
"""Do action requested from the web portal."""
- requestParams = self._requestParams()
- if not requestParams.get("action"):
+ if not (methodName := action):
return {"success": "false", "error": "Missing action"}
- # pylint does not understand the action entry is not None any more
- actionName = requestParams["action"][0] # pylint: disable=unsubscriptable-object
-
- methodName = actionName
- if not actionName.startswith("set"):
- methodName = f"_get{actionName}"
+ if not action.startswith("set"):
+ methodName = f"_get{action}"
- try:
- return getattr(self, methodName)(requestParams)
- except AttributeError:
- return {"success": "false", "error": f"bad action {actionName}"}
+ if not hasattr(self, methodName):
+ return {"success": "false", "error": f"bad action {action}"}
+ return getattr(self, methodName)(kwargs)
def __checkAuth(self, username: str):
"""Check user permissions
- :param username: user name
-
:return: None if all OK else error message
"""
- if username == "anonymous":
+ if username.lower() == "anonymous":
return "Cannot perform this operation as anonymous"
if "SiteManager" not in self.getProperties():
return "Not authorized"
def setToken(self, requestParams: dict) -> dict:
- """Set token
-
- :param requestParams: request parameters
- """
+ """Set token"""
if error := self.__checkAuth(username := self.getUserName()):
return {"success": "false", "error": error}
- res = PublisherClient().setToken(
+ result = PublisherClient().setToken(
self.ELEMENT_TYPE,
- str(requestParams["name"][0]),
- str(requestParams["statusType"][0]),
- str(requestParams["status"][0]),
- str(requestParams["elementType"][0]),
+ requestParams["name"],
+ requestParams["statusType"],
+ requestParams["status"],
+ requestParams["elementType"],
username,
- str(requestParams["lastCheckTime"][0]),
+ requestParams["lastCheckTime"],
)
- if not res["OK"]:
- return {"success": "false", "error": res["Message"]}
- return {"success": "true", "result": res["Value"]}
+ if not result["OK"]:
+ return {"success": "false", "error": result["Message"]}
+ return {"success": "true", "result": result["Value"]}
def setStatus(self, requestParams: dict) -> dict:
- """Set token
-
- :param requestParams: request parameters
- """
+ """Set token"""
if error := self.__checkAuth(username := self.getUserName()):
return {"success": "false", "error": error}
- res = PublisherClient().setStatus(
+ result = PublisherClient().setStatus(
self.ELEMENT_TYPE,
- str(requestParams["name"][0]),
- str(requestParams["statusType"][0]),
- str(requestParams["status"][0]),
- str(requestParams["elementType"][0]),
+ requestParams["name"],
+ requestParams["statusType"],
+ requestParams["status"],
+ requestParams["elementType"],
username,
- str(requestParams["lastCheckTime"][0]),
+ requestParams["lastCheckTime"],
)
- if not res["OK"]:
- return {"success": "false", "error": res["Message"]}
- return {"success": "true", "result": res["Value"]}
+ if not result["OK"]:
+ return {"success": "false", "error": result["Message"]}
+ return {"success": "true", "result": result["Value"]}
def _checkParameters(self, requestParams: dict, parametersToCheck: list):
"""Check incoming parameters
@@ -195,91 +165,74 @@ def _checkParameters(self, requestParams: dict, parametersToCheck: list):
return f"Missing {fieldName}"
def _getHistory(self, requestParams: dict) -> dict:
- """Get history
-
- :param requestParams: request parameters
- """
+ """Get history"""
if error := self._checkParameters(requestParams, ["name", "elementType", "statusType"]):
return {"success": "false", "error": error}
- res = PublisherClient().getElementHistory(
- self.ELEMENT_TYPE,
- requestParams["name"],
- requestParams["elementType"],
- requestParams["statusType"],
+ result = PublisherClient().getElementHistory(
+ self.ELEMENT_TYPE, requestParams["name"], requestParams["elementType"], requestParams["statusType"]
)
-
- if not res["OK"]:
- gLogger.error(res["Message"])
+ if not result["OK"]:
+ gLogger.error(result["Message"])
return {"success": "false", "error": "error getting history"}
- history = [[r[0], str(r[1]), r[2]] for r in res["Value"]]
+ history = [[r[0], str(r[1]), r[2]] for r in result["Value"]]
return {"success": "true", "result": history, "total": len(history)}
def _getPolicies(self, requestParams: dict) -> dict:
- """Get policies
-
- :param requestParams: request parameters
- """
+ """Get policies"""
if error := self._checkParameters(requestParams, ["name", "statusType"]):
return {"success": "false", "error": error}
- res = PublisherClient().getElementPolicies(
+ result = PublisherClient().getElementPolicies(
self.ELEMENT_TYPE, requestParams["name"], requestParams["statusType"]
)
-
- if not res["OK"]:
- gLogger.error(res["Message"])
+ if not result["OK"]:
+ gLogger.error(result["Message"])
return {"success": "false", "error": "error getting policies"}
- policies = [[r[0], r[1], str(r[2]), str(r[3]), r[4]] for r in res["Value"]]
+ policies = [[r[0], r[1], str(r[2]), str(r[3]), r[4]] for r in result["Value"]]
return {"success": "true", "result": policies, "total": len(policies)}
def _getDowntime(self, requestParams: dict) -> dict:
- """Get downtime
-
- :param requestParams: request parameters
- """
+ """Get downtime"""
if error := self._checkParameters(requestParams, ["name", "element", "elementType", "statusType"]):
return {"success": "false", "error": error}
- res = PublisherClient().getDowntimes(
- str(requestParams["element"][-1]),
- str(requestParams["elementType"][-1]),
- str(requestParams["name"][-1]),
+ result = PublisherClient().getDowntimes(
+ requestParams["element"],
+ requestParams["elementType"],
+ requestParams["name"],
)
- if not res["OK"]:
- gLogger.error(res["Message"])
+ if not result["OK"]:
+ gLogger.error(result["Message"])
return {"success": "false", "error": "error getting downtimes"}
- downtimes = [[str(dt[0]), str(dt[1]), dt[2], dt[3], dt[4]] for dt in res["Value"]]
+ downtimes = [[str(dt[0]), str(dt[1]), dt[2], dt[3], dt[4]] for dt in result["Value"]]
return {"success": "true", "result": downtimes, "total": len(downtimes)}
def _getTimeline(self, requestParams: dict) -> dict:
- """Get timeline
-
- :param requestParams: request parameters
- """
+ """Get timeline"""
if error := self._checkParameters(requestParams, ["name", "elementType", "statusType"]):
return {"success": "false", "error": error}
- res = PublisherClient().getElementHistory(
+ result = PublisherClient().getElementHistory(
self.ELEMENT_TYPE,
- str(requestParams["name"][-1]),
- str(requestParams["elementType"][-1]),
- str(requestParams["statusType"][-1]),
+ requestParams["name"],
+ requestParams["elementType"],
+ requestParams["statusType"],
)
- if not res["OK"]:
- gLogger.error(res["Message"])
+ if not result["OK"]:
+ gLogger.error(result["Message"])
return {"success": "false", "error": "error getting history"}
history = []
- for status, dateEffective, reason in res["Value"]:
+ for status, dateEffective, reason in result["Value"]:
# history.append( [ history[ -1 ][ 0 ], str( dateEffective - timedelta( seconds = 1 ) ), '' ] )
@@ -288,34 +241,30 @@ def _getTimeline(self, requestParams: dict) -> dict:
return {"success": "true", "result": history, "total": len(history)}
def _getTree(self, requestParams: dict) -> dict:
- """Get timeline
-
- :param requestParams: request parameters
- """
+ """Get timeline"""
if error := self._checkParameters(requestParams, ["name", "elementType", "statusType"]):
return {"success": "false", "error": error}
- pub = PublisherClient()
- res = PublisherClient().getTree(str(requestParams["elementType"][-1]), str(requestParams["name"][-1]))
- if not res["OK"]:
- gLogger.error(res["Message"])
+ result = PublisherClient().getTree(requestParams["elementType"], requestParams["name"])
+ if not result["OK"]:
+ gLogger.error(result["Message"])
return {"success": "false", "error": "error getting tree"}
- res = res["Value"]
+ data = result["Value"]
- siteName = list(res)[0]
+ siteName = list(data)[0]
tree = [[siteName, None, None, None]]
- for k, v in res[siteName]["statusTypes"].items():
+ for k, v in data[siteName]["statusTypes"].items():
tree.append([None, k, v, siteName])
tree.append(["ces", None, None, siteName])
- for ce, ceDict in res[siteName]["ces"].items():
+ for ce, ceDict in data[siteName]["ces"].items():
tree.append([ce, None, None, "ces"])
for k, v in ceDict.items():
tree.append([None, k, v, ce])
tree.append(["ses", None, None, siteName])
- for se, seDict in res[siteName]["ses"].items():
+ for se, seDict in data[siteName]["ses"].items():
tree.append([se, None, None, "ses"])
for k, v in seDict.items():
tree.append([None, k, v, se])
@@ -323,102 +272,59 @@ def _getTree(self, requestParams: dict) -> dict:
return {"success": "true", "result": tree, "total": len(tree)}
def _getInfo(self, requestParams: dict) -> dict:
- """Get timeline
-
- :param requestParams: request parameters
- """
+ """Get timeline"""
if error := self._checkParameters(requestParams, ["name", "element", "elementType", "statusType"]):
return {"success": "false", "error": error}
- res = PublisherClient().getElementStatuses(
- str(requestParams["element"][-1]),
- str(requestParams["name"][-1]),
- str(requestParams["elementType"][-1]),
- str(requestParams["statusType"][-1]),
+ result = PublisherClient().getElementStatuses(
+ requestParams["element"],
+ requestParams["name"],
+ requestParams["elementType"],
+ requestParams["statusType"],
None,
None,
)
- if not res["OK"]:
- return {"success": "false", "error": res["Message"]}
- if not res["Value"]:
+ if not result["OK"]:
+ return {"success": "false", "error": result["Message"]}
+ if not result["Value"]:
return {"success": "false", "error": "Nothing found."}
- columns = res["Columns"]
-
- res = dict(zip(columns, res["Value"][0]))
- res["DateEffective"] = str(res["DateEffective"])
- res["LastCheckTime"] = str(res["LastCheckTime"])
- res["TokenExpiration"] = str(res["TokenExpiration"])
-
- return {"success": "true", "result": res, "total": len(res)}
+ columns = result["Columns"]
- def _requestParams(self) -> dict:
- """
- We receive the request and we parse it, in this case, we are doing nothing,
- but it can be certainly more complex.
- """
- gLogger.always("!!! PARAMS: ", repr(self.request.arguments))
+ data = dict(zip(columns, result["Value"][0]))
+ data["DateEffective"] = str(data["DateEffective"])
+ data["LastCheckTime"] = str(data["LastCheckTime"])
+ data["TokenExpiration"] = str(data["TokenExpiration"])
- responseParams = {
- "element": None,
- "name": None,
- "elementType": None,
- "statusType": None,
- "status": None,
- "tokenOwner": None,
- "lastCheckTime": None,
- "action": None,
- }
-
- for key in responseParams:
- value = self.get_argument(key, "")
- if value:
- responseParams[key] = list(json.loads(value))
-
- return responseParams
+ return {"success": "true", "result": data, "total": len(data)}
class ResourceSummaryHandler(SummaryHandlerMix):
ELEMENT_TYPE = "Resource"
- @asyncGen
def web_getSelectionData(self):
- callback = yield self.threadTask(self._getSelectionData)
- self.finish(callback)
+ return self._getSelectionData()
- @asyncGen
- def web_expand(self):
- callback = yield self.threadTask(self._expand)
- self.finish(callback)
+ def web_expand(self, name=None):
+ return self._expand(name)
- @asyncGen
- def web_action(self):
- callback = yield self.threadTask(self._action)
- self.finish(callback)
+ def web_action(self, action=None, **kwargs):
+ return self._action(action, **kwargs)
- @asyncGen
- def web_getResourceSummaryData(self):
+ def web_getResourceSummaryData(self, name=None, status=None, elementType=None, statusType=None, tokenOwner=None):
"""This method returns the data required to fill the grid."""
-
- requestParams = self._requestParams()
- gLogger.info(requestParams)
-
- pub = PublisherClient()
-
- elementStatuses = yield self.threadTask(
- pub.getElementStatuses,
+ elementStatuses = PublisherClient().getElementStatuses(
self.ELEMENT_TYPE,
- requestParams["name"],
- requestParams["elementType"],
- requestParams["statusType"],
- requestParams["status"],
- requestParams["tokenOwner"],
+ name,
+ elementType,
+ statusType,
+ status,
+ tokenOwner,
)
if not elementStatuses["OK"]:
- self.finish({"success": "false", "error": elementStatuses["Message"]})
- return
+ return {"success": "false", "error": elementStatuses["Message"]}
elementTree = collections.defaultdict(list)
@@ -439,13 +345,10 @@ def web_getResourceSummaryData(self):
if len(elementValues) == 1:
elementList.append(elementValues[0])
else:
-
elementList.append(self.combine(elementValues))
- rssMachine = RSSMachine(None)
-
- yield self.threadTask(rssMachine.orderPolicyResults, elementList)
+ RSSMachine(None).orderPolicyResults(elementList)
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M [UTC]")
- self.finish({"success": "true", "result": elementList, "total": len(elementList), "date": timestamp})
+ return {"success": "true", "result": elementList, "total": len(elementList), "date": timestamp}
diff --git a/src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py b/src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py
index cf479498c..a35a19e5d 100644
--- a/src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py
+++ b/src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py
@@ -9,7 +9,6 @@
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getSiteCEMapping, getGOCSiteName, getDIRACSiteName
from DIRAC.ResourceStatusSystem.Client.PublisherClient import PublisherClient
-from WebAppDIRAC.Lib.WebHandler import asyncGen
from WebAppDIRAC.WebApp.handler.ResourceSummaryHandler import SummaryHandlerMix
@@ -17,97 +16,79 @@ class SiteSummaryHandler(SummaryHandlerMix):
ELEMENT_TYPE = "Site"
- @asyncGen
- def web_getSelectionData(self):
- callback = yield self.threadTask(self._getSelectionData)
- self.finish(callback)
+ def web_getSelectionData(self, **kwargs):
+ return self._getSelectionData(**kwargs)
- @asyncGen
- def web_expand(self):
- callback = yield self.threadTask(self._expand)
- self.finish(callback)
+ def web_expand(self, name=None):
+ return self._expand(name)
- @asyncGen
- def web_action(self):
- callback = yield self.threadTask(self._action)
- self.finish(callback)
+ def web_action(self, action=None, **kwargs):
+ return self._action(action, **kwargs)
- @asyncGen
- def web_getSiteSummaryData(self):
+ def web_getSiteSummaryData(
+ self, name=None, status=None, action=None, elementType=None, statusType=None, tokenOwner=None
+ ):
"""This method returns the data required to fill the grid."""
- requestParams = self.__requestParams()
- gLogger.info(requestParams)
-
- elementStatuses = yield self.threadTask(
- PublisherClient().getElementStatuses,
- "Site",
- requestParams["name"],
- requestParams["elementType"],
- requestParams["statusType"],
- requestParams["status"],
- requestParams["tokenOwner"],
+ if name:
+ name = list(json.loads(name))
+ if status:
+ status = list(json.loads(status))
+ if action:
+ action = list(json.loads(action))
+ if elementType:
+ elementType = list(json.loads(elementType))
+ if statusType:
+ statusType = list(json.loads(statusType))
+ if tokenOwner:
+ tokenOwner = list(json.loads(tokenOwner))
+
+ elementStatuses = PublisherClient().getElementStatuses(
+ "Site", name, elementType, statusType, status, tokenOwner
)
if not elementStatuses["OK"]:
- self.finish({"success": "false", "error": elementStatuses["Message"]})
- return
+ return {"success": "false", "error": elementStatuses["Message"]}
elementList = [dict(zip(elementStatuses["Columns"], site)) for site in elementStatuses["Value"]]
for elementStatus in elementList:
-
elementStatus["Country"] = elementStatus["Name"][-2:]
elementStatus["DateEffective"] = str(elementStatus["DateEffective"])
elementStatus["LastCheckTime"] = str(elementStatus["LastCheckTime"])
elementStatus["TokenExpiration"] = str(elementStatus["TokenExpiration"])
- result = {"success": "true", "result": elementList, "total": len(elementList)}
-
- self.finish(result)
+ return {"success": "true", "result": elementList, "total": len(elementList)}
def _getInfo(self, requestParams: dict) -> dict:
- """Get site info
-
- :param requestParams: request parameters
- """
-
- gLogger.info(requestParams)
-
- if not requestParams["name"]:
+ """Get site info"""
+ if not (name := requestParams.get("name")):
gLogger.warn("No name given")
return {"success": "false", "error": "We need a Site Name to generate an Overview"}
- elementName = requestParams["name"][0]
-
- elementStatuses = PublisherClient().getElementStatuses("Site", str(elementName), None, "all", None, None)
+ elementStatuses = PublisherClient().getElementStatuses("Site", name, None, "all", None, None)
if not elementStatuses["OK"]:
gLogger.error(elementStatuses["Message"])
return {"success": "false", "error": "Error getting ElementStatus information"}
if not elementStatuses["Value"]:
- gLogger.error('element "%s" not found' % elementName)
- return {"success": "false", "error": 'element "%s" not found' % elementName}
+ gLogger.error(f'element "{name}" not found')
+ return {"success": "false", "error": f'element "{name}" not found'}
elementStatus = [dict(zip(elementStatuses["Columns"], element)) for element in elementStatuses["Value"]][0]
elementStatus["DateEffective"] = str(elementStatus["DateEffective"])
elementStatus["LastCheckTime"] = str(elementStatus["LastCheckTime"])
elementStatus["TokenExpiration"] = str(elementStatus["TokenExpiration"])
- gocdb_name = getGOCSiteName(elementName)
- if not gocdb_name["OK"]:
+ if not (gocdb_name := getGOCSiteName(name))["OK"]:
gLogger.error(gocdb_name["Message"])
elementStatus["GOCDB"] = ""
gocdb_name = ""
else:
gocdb_name = gocdb_name["Value"]
elementStatus["GOCDB"] = '<a href="https://goc.egi.eu/portal/index.php?Page_'
- elementStatus["GOCDB"] += 'Type=Submit_Search&SearchString=%s" target="_blank">%s</a>' % (
- gocdb_name,
- gocdb_name,
- )
+ elementStatus["GOCDB"] += f'Type=Submit_Search&SearchString={gocdb_name}" target="_blank">{gocdb_name}</a>'
- dirac_names = getDIRACSiteName(gocdb_name)
- if not dirac_names["OK"]:
+ if not (dirac_names := getDIRACSiteName(gocdb_name))["OK"]:
gLogger.error(dirac_names["Message"])
dirac_names = []
else:
@@ -155,75 +136,58 @@ def _getInfo(self, requestParams: dict) -> dict:
return {"success": "true", "result": elementStatus, "total": len(elementStatus)}
def _getStorages(self, requestParams: dict) -> dict:
- """Get storages
-
- :param requestParams: request parameters
- """
- if not requestParams["name"]:
+ """Get storages"""
+ if not (name := requestParams.get("name")):
gLogger.warn("No name given")
return {"success": "false", "error": "We need a Site Name to generate an Overview"}
- elementName = requestParams["name"][0]
- retVal = getSEsForSite(elementName)
- if not retVal["OK"]:
- return {"success": "false", "error": retVal["Message"]}
- storageElements = retVal["Value"]
+ if not (result := getSEsForSite(name))["OK"]:
+ return {"success": "false", "error": result["Message"]}
+ storageElements = result["Value"]
storageElementsStatus = []
- gLogger.info("storageElements = " + str(storageElements))
+ gLogger.info(f"storageElements = {storageElements}")
# FIXME: use properly RSS
for se in storageElements:
- sestatuses = PublisherClient().getElementStatuses("Resource", se, None, None, None, None)
-
- for sestatus in sestatuses["Value"]:
+ result = PublisherClient().getElementStatuses("Resource", se, None, None, None, None)
+ for sestatus in result["Value"]:
storageElementsStatus.append([sestatus[0], sestatus[1], sestatus[2], sestatus[6]])
return {"success": "true", "result": storageElementsStatus, "total": len(storageElementsStatus)}
def _getComputingElements(self, requestParams: dict) -> dict:
- """Get computing elements
-
- :param requestParams: request parameters
- """
- if not requestParams["name"]:
+ """Get computing elements"""
+ if not (name := requestParams.get("name")):
gLogger.warn("No name given")
return {"success": "false", "error": "We need a Site Name to generate an Overview"}
- elementName = requestParams["name"][0]
-
- res = getSiteCEMapping()
- if not res["OK"]:
- return {"success": "false", "error": res["Message"]}
- computing_elements = res["Value"][elementName]
+ if not (result := getSiteCEMapping())["OK"]:
+ return {"success": "false", "error": result["Message"]}
+ computing_elements = result["Value"][name]
computing_elements_status = []
- gLogger.info("computing_elements = " + str(computing_elements))
+ gLogger.info(f"computing_elements = {computing_elements}")
for ce in computing_elements:
- cestatuses = PublisherClient().getElementStatuses("Resource", ce, None, "all", None, None)
- gLogger.info("cestatus = " + str(cestatuses))
+ result = PublisherClient().getElementStatuses("Resource", ce, None, "all", None, None)
+ gLogger.info(f"cestatus = {result}")
- for cestatus in cestatuses["Value"]:
+ for cestatus in result["Value"]:
computing_elements_status.append([cestatus[0], cestatus[1], cestatus[2], cestatus[6]])
return {"success": "true", "result": computing_elements_status, "total": len(computing_elements_status)}
def _getImages(self, requestParams: dict) -> dict:
- """Get images
-
- :param requestParams: request parameters
- """
- if not requestParams["name"]:
+ """Get images"""
+ if not (name := requestParams.get("name")):
gLogger.warn("No name given")
return {"success": "false", "error": "We need a Site Name to generate an Overview"}
- elementName = requestParams["name"][0]
-
- elementStatuses = PublisherClient().getElementStatuses("Site", str(elementName), None, "all", None, None)
+ elementStatuses = PublisherClient().getElementStatuses("Site", name, None, "all", None, None)
if not elementStatuses["Value"]:
- gLogger.error('element "%s" not found' % elementName)
- return {"success": "false", "error": 'element "%s" not found' % elementName}
+ gLogger.error(f'element "{name}" not found')
+ return {"success": "false", "error": f'element "{name}" not found'}
elementStatus = [dict(zip(elementStatuses["Columns"], element)) for element in elementStatuses["Value"]][0]
@@ -296,27 +260,3 @@ def getPlotDict(
plotDict["condDict"]["Status"] = [status]
return plotDict
-
- def __requestParams(self) -> dict:
- """
- We receive the request and we parse it, in this case, we are doing nothing,
- but it can be certainly more complex.
- """
-
- gLogger.always("!!! PARAMS: ", repr(self.request.arguments))
-
- responseParams = {
- "name": None,
- "elementType": None,
- "statusType": None,
- "status": None,
- "action": None,
- "tokenOwner": None,
- }
-
- for key in responseParams:
- value = self.get_argument(key, None)
- if value:
- responseParams[key] = list(json.loads(value))
-
- return responseParams
diff --git a/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/OverviewPanel.js b/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/OverviewPanel.js
index c0abb651f..f95c8b428 100644
--- a/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/OverviewPanel.js
+++ b/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/OverviewPanel.js
@@ -352,11 +352,11 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Info"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
- element: selection.element ? Ext.JSON.encode([selection.element]) : Ext.JSON.encode(["Resource"]),
+ action: "Info",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
+ element: selection.element ? selection.element : "Resource",
},
scope: me,
failure: function (response) {
@@ -381,10 +381,10 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["History"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
+ action: "History",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
},
scope: me,
failure: function (response) {
@@ -408,11 +408,11 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Downtime"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- element: Ext.JSON.encode(["Resource"]),
- statusType: Ext.JSON.encode([selection.statusType]),
+ action: "Downtime",
+ name: selection.name,
+ elementType: selection.elementType,
+ element: "Resource",
+ statusType: selection.statusType,
},
scope: me,
failure: function (response) {
@@ -436,10 +436,10 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Policies"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
+ action: "Policies",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
},
scope: me,
failure: function (response) {
@@ -463,10 +463,10 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Timeline"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
+ action: "Timeline",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
},
scope: me,
failure: function (response) {
@@ -531,10 +531,10 @@ Ext.define("DIRAC.ResourceSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Tree"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
+ action: "Tree",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
},
scope: me,
failure: function (response) {
diff --git a/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/ResourceSummary.js b/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/ResourceSummary.js
index b812592f0..6276d43f5 100644
--- a/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/ResourceSummary.js
+++ b/src/WebAppDIRAC/WebApp/static/DIRAC/ResourceSummary/classes/ResourceSummary.js
@@ -332,7 +332,7 @@ Ext.define("DIRAC.ResourceSummary.classes.ResourceSummary", {
if (Ext.getCmp(targetId + "_grid") == null) {
var params = {
- name: Ext.JSON.encode([record.data.Name]),
+ name: record.data.Name,
};
var oProxy = Ext.create("Ext.dirac.utils.DiracAjaxProxy", {
url: GLOBAL.BASE_URL + "ResourceSummary/expand",
@@ -463,10 +463,10 @@ Ext.define("DIRAC.ResourceSummary.classes.ResourceSummary", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode([action]),
- name: Ext.JSON.encode([selectedValues.name]),
- elementType: Ext.JSON.encode([selectedValues.elementType]),
- statusType: Ext.JSON.encode([selectedValues.statusType]),
+ action: action,
+ name: selectedValues.name,
+ elementType: selectedValues.elementType,
+ statusType: selectedValues.statusType,
},
scope: me,
failure: function (response) {
@@ -583,12 +583,12 @@ Ext.define("DIRAC.ResourceSummary.classes.ResourceSummary", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode([action]),
- name: Ext.JSON.encode([selectedValues.name]),
- elementType: Ext.JSON.encode([selectedValues.elementType]),
- statusType: Ext.JSON.encode([selectedValues.statusType]),
- status: Ext.JSON.encode([newStatus]),
- lastCheckTime: Ext.JSON.encode([selectedValues.lastCheckTime]),
+ action: action,
+ name: selectedValues.name,
+ elementType: selectedValues.elementType,
+ statusType: selectedValues.statusType,
+ status: newStatus,
+ lastCheckTime: selectedValues.lastCheckTime,
},
scope: me,
failure: function (response) {
diff --git a/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/OverviewPanel.js b/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/OverviewPanel.js
index 57b6f833b..980ea4150 100644
--- a/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/OverviewPanel.js
+++ b/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/OverviewPanel.js
@@ -474,11 +474,11 @@ Ext.define("DIRAC.SiteSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Info"]),
- name: Ext.JSON.encode([selection.name]),
- elementType: Ext.JSON.encode([selection.elementType]),
- statusType: Ext.JSON.encode([selection.statusType]),
- element: selection.element ? Ext.JSON.encode([selection.element]) : Ext.JSON.encode(["Resource"]),
+ action: "Info",
+ name: selection.name,
+ elementType: selection.elementType,
+ statusType: selection.statusType,
+ element: selection.element ? selection.element : "Resource",
},
scope: me,
failure: function (response) {
@@ -503,8 +503,8 @@ Ext.define("DIRAC.SiteSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["ComputingElements"]),
- name: Ext.JSON.encode([selection.name]),
+ action: "ComputingElements",
+ name: selection.name,
},
scope: me,
failure: function (response) {
@@ -528,8 +528,8 @@ Ext.define("DIRAC.SiteSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Storages"]),
- name: Ext.JSON.encode([selection.name]),
+ action: "Storages",
+ name: selection.name,
},
scope: me,
failure: function (response) {
@@ -552,8 +552,8 @@ Ext.define("DIRAC.SiteSummary.classes.OverviewPanel", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode(["Images"]),
- name: Ext.JSON.encode([selection.name]),
+ action: "Images",
+ name: selection.name,
},
scope: me,
failure: function (response) {
diff --git a/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js b/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
index afb5df6e9..3b22115ba 100644
--- a/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
+++ b/src/WebAppDIRAC/WebApp/static/DIRAC/SiteSummary/classes/SiteSummary.js
@@ -360,10 +360,10 @@ Ext.define("DIRAC.SiteSummary.classes.SiteSummary", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode([action]),
- name: Ext.JSON.encode([selectedValues.name]),
- elementType: Ext.JSON.encode([selectedValues.elementType]),
- statusType: Ext.JSON.encode([selectedValues.statusType]),
+ action: action,
+ name: selectedValues.name,
+ elementType: selectedValues.elementType,
+ statusType: selectedValues.statusType,
},
scope: me,
failure: function (response) {
@@ -480,12 +480,12 @@ Ext.define("DIRAC.SiteSummary.classes.SiteSummary", {
url: GLOBAL.BASE_URL + me.applicationName + "/action",
method: "POST",
params: {
- action: Ext.JSON.encode([action]),
- name: Ext.JSON.encode([selectedValues.name]),
- elementType: Ext.JSON.encode([selectedValues.elementType]),
- statusType: Ext.JSON.encode([selectedValues.statusType]),
- status: Ext.JSON.encode([newStatus]),
- lastCheckTime: Ext.JSON.encode([selectedValues.lastCheckTime]),
+ action: action,
+ name: selectedValues.name,
+ elementType: selectedValues.elementType,
+ statusType: selectedValues.statusType,
+ status: newStatus,
+ lastCheckTime: selectedValues.lastCheckTime,
},
scope: me,
failure: function (response) {
| Replace WebHandler with _WebHandler
Apologies if I haven't found where this is was discussed but it seems that we currently have two implementations of the `WebHandler`:
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/Lib/WebHandler.py#L161
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/Lib/WebHandler.py#L401
Most classes seem to be using `_WebHandler` which appears to be a much nicer implementation which delegates every request to a dedicated thread so the `@asyncGen` and ` yield self.threadTask` stuff isn't needed like in:
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py#L91-L92
https://github.com/DIRACGrid/WebAppDIRAC/blob/d70c268ff47fc051ca29fd0415fcaeb0e7587f2a/src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py#L104
Is there a reason why we need both implementations? I think we should just move the legacy style handlers to the new one and delete the old implementation.
| Yes, you are absolutely right, now the process of switching to `_WebHandler` is underway, as soon as it ends `WebHandler` is simply removed and `_WebHandler` will be renamed to `WebHandler`.
I think it would be good to deal with this before releasing v5.0 so we have a cleaner starting point fot v5.1 (which I have some ideas for).
Quickly checking these are the modules which need to be updated in vanilla DIRAC. I'll try to start working through them now starting from the top and checking them off:
- [x] `src/WebAppDIRAC/WebApp/handler/ConfigurationManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ProxyManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ProxyUploadHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/PublicStateManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/RegistryManagerHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/RequestMonitorHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/ResourceSummaryHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SiteSummaryHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SpaceOccupancyHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/SystemAdministrationHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/TransformationMonitorHandler.py`
- [x] `src/WebAppDIRAC/WebApp/handler/VMDiracHandler.py`
| 2022-06-21T13:57:57 | 0.0 | [] | [] |
||
algorand/pyteal | algorand__pyteal-401 | e5c3101358ac15acaa2d4f67a25f90fd9eee6c09 | diff --git a/pyteal/ast/abi/array_dynamic.py b/pyteal/ast/abi/array_dynamic.py
index edb40a718..41e8e802b 100644
--- a/pyteal/ast/abi/array_dynamic.py
+++ b/pyteal/ast/abi/array_dynamic.py
@@ -60,7 +60,8 @@ def set(
self,
values: Union[Sequence[T], "DynamicArray[T]", ComputedValue["DynamicArray[T]"]],
) -> Expr:
- """Set the ABI dynamic array with one of the following
+ """Set the ABI dynamic array with one of the following:
+
* a sequence of ABI type variables
* or another ABI static array
* or a ComputedType with same TypeSpec
@@ -69,10 +70,15 @@ def set(
from ComputedType to store the internal ABI encoding into this StaticArray.
This function determines if the argument `values` is an ABI dynamic array:
+
* if so:
+
* checks whether `values` is same type as this ABI dynamic array.
+
* stores the encoding of `values`.
+
* if not:
+
* calls the inherited `set` function and stores `values`.
Args:
diff --git a/pyteal/ast/abi/array_static.py b/pyteal/ast/abi/array_static.py
index 51f938228..c88daed22 100644
--- a/pyteal/ast/abi/array_static.py
+++ b/pyteal/ast/abi/array_static.py
@@ -85,6 +85,7 @@ def set(
],
) -> Expr:
"""Set the ABI static array with one of the following:
+
* a sequence of ABI type variables
* or another ABI static array
* or a ComputedType with same TypeSpec
@@ -93,11 +94,17 @@ def set(
from ComputedType to store the internal ABI encoding into this StaticArray.
This function determines if the argument `values` is an ABI static array:
+
* if so:
+
* checks whether `values` is same type as this ABI staic array.
+
* stores the encoding of `values`.
+
* if not:
+
* checks whether static array length matches sequence length.
+
* calls the inherited `set` function and stores `values`.
Args:
diff --git a/pyteal/ir/tealblock.py b/pyteal/ir/tealblock.py
index 5fd2d5a42..7ce6259bc 100644
--- a/pyteal/ir/tealblock.py
+++ b/pyteal/ir/tealblock.py
@@ -255,8 +255,10 @@ def MatchScratchSlotReferences(
A mapping is defined as follows:
* The actual and expected lists must have the same length.
* For every ScratchSlot referenced by either list:
+
* If the slot appears in both lists, it must appear the exact same number of times and at
the exact same indexes in both lists.
+
* If the slot appears only in one list, for each of its appearances in that list, there
must be a ScratchSlot in the other list that appears the exact same number of times
and at the exact same indexes.
| Document new ABI features
Closes #367
For viewing the docs changes, there are a few options:
* Checkout this branch locally, activate the virtual environment, cd to the `docs` folder, and run `make html`
* OR, you can download the `pyteal.docset` artifact from github actions and use either [Kapeli's Dash](https://kapeli.com/dash) or [zeal](https://zealdocs.org/) to view them locally.
| 2022-06-17T14:29:58 | 0.0 | [] | [] |
|||
nubank/fklearn | nubank__fklearn-232 | 8fe082356cd0c920b97b0d1cf150af0d9417d978 | diff --git a/src/fklearn/training/classification.py b/src/fklearn/training/classification.py
index 263cad44..a27eaa28 100644
--- a/src/fklearn/training/classification.py
+++ b/src/fklearn/training/classification.py
@@ -620,10 +620,14 @@ def lgbm_classification_learner(df: pd.DataFrame,
import lightgbm as lgbm
+ LGBM_MULTICLASS_OBJECTIVES = {'multiclass', 'softmax', 'multiclassova', 'multiclass_ova', 'ova', 'ovr'}
+
params = extra_params if extra_params else {}
params = assoc(params, "eta", learning_rate)
params = params if "objective" in params else assoc(params, "objective", 'binary')
+ is_multiclass_classification = params["objective"] in LGBM_MULTICLASS_OBJECTIVES
+
weights = df[weight_column].values if weight_column else None
features = features if not encode_extra_cols else expand_features_encoded(df, features)
@@ -637,7 +641,7 @@ def lgbm_classification_learner(df: pd.DataFrame,
callbacks=callbacks)
def p(new_df: pd.DataFrame, apply_shap: bool = False) -> pd.DataFrame:
- if params["objective"] == "multiclass":
+ if is_multiclass_classification:
col_dict = {prediction_column + "_" + str(key): value
for (key, value) in enumerate(bst.predict(new_df[features].values).T)}
else:
@@ -649,7 +653,7 @@ def p(new_df: pd.DataFrame, apply_shap: bool = False) -> pd.DataFrame:
shap_values = explainer.shap_values(new_df[features])
shap_expected_value = explainer.expected_value
- if params["objective"] == "multiclass":
+ if is_multiclass_classification:
shap_values_multiclass = {f"shap_values_{class_index}": list(value)
for (class_index, value) in enumerate(shap_values)}
shap_expected_value_multiclass = {
| `lgbm_classification_learner` failing when using multiclass one-vs-all objective
### Code sample
```python
import pandas as pd
import numpy as np
from fklearn.training.classification import lgbm_classification_learner
# sample df with three classes
sample_df = pd.DataFrame(
{
"a": np.linspace(1, 100, 100),
"b": np.sin(np.linspace(1, 100, 100)),
}
)
sample_df["target_raw"] = sample_df["a"]*sample_df["b"]
sample_df["target"] = np.where((sample_df["a"] < 50)&(sample_df["b"] < 0), 0, np.where(sample_df["target_raw"] < 0, 1, 2))
# train the multiclass lightgbm
p, train_scored, logs = lgbm_classification_learner(num_estimators=10, target="target", features=["a", "b"], extra_params={"objective": "multiclassova", "num_class": 3})(sample_df)
```
### Problem description
The current implementation of `lgbm_classification_learner` breaks when we use the `multiclassova` or any of its aliases as an objective function.
The check in the learner only accounts for `multiclass`, which uses the Softmax objective function, but will give an error for the `multiclassova`.
The previous code will fail with `ValueError: Wrong number of items passed 3, placement implies 1`
### Expected behavior
The function should check if the objective is not only `multiclass` but also `multiclassova` or **any** valid aliases of these objectives to make sure it doesn't break.
### Possible solutions
Change the `lgbm_classification_learner` if condition that checks for `multiclass` to check for all the other multi output objectives as well as its aliases ("softmax", "ova", "ovr")
| 2023-06-28T04:57:32 | 0.0 | [] | [] |
|||
AccordBox/python-webpack-boilerplate | AccordBox__python-webpack-boilerplate-14 | c2ed4b453f080ca2b3cecb96cd778401917d5e16 | diff --git a/webpack_boilerplate/config.py b/webpack_boilerplate/config.py
index 3f2c343..447250e 100644
--- a/webpack_boilerplate/config.py
+++ b/webpack_boilerplate/config.py
@@ -15,6 +15,7 @@ def load_from_django():
user_config = dict(DEFAULT_CONFIG, **getattr(settings, "WEBPACK_LOADER", {}))
user_config["ignores"] = [re.compile(I) for I in user_config["IGNORE"]]
+ user_config["web_framework"] = "django"
return user_config
@@ -30,6 +31,7 @@ def load_from_flask():
user_config = dict(DEFAULT_CONFIG, **current_app.config["WEBPACK_LOADER"])
user_config["ignores"] = [re.compile(I) for I in user_config["IGNORE"]]
+ user_config["web_framework"] = "flask"
return user_config
diff --git a/webpack_boilerplate/loader.py b/webpack_boilerplate/loader.py
index 938e38a..9558aee 100644
--- a/webpack_boilerplate/loader.py
+++ b/webpack_boilerplate/loader.py
@@ -46,7 +46,23 @@ def filter_chunks(self, chunks):
yield chunk
def get_chunk_url(self, chunk):
- return chunk["url"]
+ url = chunk["url"]
+
+ if self.config.get("web_framework", None) == "django":
+ from django.contrib.staticfiles.storage import staticfiles_storage
+ from django.conf import settings
+
+ if url.startswith("http"):
+ # webpack dev server
+ return url
+ else:
+ prefix = settings.STATIC_URL
+ url_without_static_prefix = url[
+ url.startswith(prefix) and len(prefix) :
+ ]
+ return staticfiles_storage.url(url_without_static_prefix)
+ else:
+ return url
def get_bundle(self, bundle_name):
assets = copy.copy(self.get_assets())
| TODO: Render hash version static assets in Django
So it can work better with Whitenoise
| 2021-11-15T09:39:02 | 0.0 | [] | [] |
|||
matrix-org/pantalaimon | matrix-org__pantalaimon-173 | 3968c69aa846889970df1372ba9aa54c1c5e4290 | diff --git a/pantalaimon/panctl.py b/pantalaimon/panctl.py
index 1f97fe7..6519d8b 100644
--- a/pantalaimon/panctl.py
+++ b/pantalaimon/panctl.py
@@ -34,7 +34,7 @@
from prompt_toolkit.completion import Completer, Completion, PathCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.patch_stdout import patch_stdout
-from pydbus import SessionBus
+from dasbus.connection import SessionMessageBus
PTK2 = ptk_version.startswith("2.")
@@ -404,8 +404,8 @@ class PanCtl:
commands = list(command_help.keys())
def __attrs_post_init__(self):
- self.bus = SessionBus()
- self.pan_bus = self.bus.get("org.pantalaimon1")
+ self.bus = SessionMessageBus()
+ self.pan_bus = self.bus.get_connection("org.pantalaimon1")
self.ctl = self.pan_bus["org.pantalaimon1.control"]
self.devices = self.pan_bus["org.pantalaimon1.devices"]
diff --git a/pantalaimon/ui.py b/pantalaimon/ui.py
index 813b67e..08e7e50 100644
--- a/pantalaimon/ui.py
+++ b/pantalaimon/ui.py
@@ -17,7 +17,7 @@
UI_ENABLED = (
util.find_spec("gi") is not None
and util.find_spec("gi.repository") is not None
- and util.find_spec("pydbus") is not None
+ and util.find_spec("dasbus") is not None
)
if UI_ENABLED:
@@ -28,8 +28,8 @@
import dbus
import notify2
from gi.repository import GLib
- from pydbus import SessionBus
- from pydbus.generic import signal
+ from dasbus import SessionMessageBus
+ from dasbus.signal import Signal
from dbus.mainloop.glib import DBusGMainLoop
from nio import RoomKeyRequest, RoomKeyRequestCancellation
@@ -123,8 +123,8 @@ class Control:
</node>
"""
- Response = signal()
- UnverifiedDevices = signal()
+ Response = Signal()
+ UnverifiedDevices = Signal()
def __init__(self, queue, server_list, id_counter):
self.queue = queue
@@ -297,13 +297,13 @@ class Devices:
</node>
"""
- VerificationInvite = signal()
- VerificationCancel = signal()
- VerificationString = signal()
- VerificationDone = signal()
+ VerificationInvite = Signal()
+ VerificationCancel = Signal()
+ VerificationString = Signal()
+ VerificationDone = Signal()
- KeyRequest = signal()
- KeyRequestCancel = signal()
+ KeyRequest = Signal()
+ KeyRequestCancel = Signal()
def __init__(self, queue, id_counter):
self.device_list = dict()
@@ -466,8 +466,8 @@ def __attrs_post_init__(self):
self.control_if = Control(self.send_queue, self.server_list, id_counter)
self.device_if = Devices(self.send_queue, id_counter)
- self.bus = SessionBus()
- self.bus.publish("org.pantalaimon1", self.control_if, self.device_if)
+ self.bus = SessionMessageBus()
+ self.bus.publish_object("org.pantalaimon1", self.control_if, self.device_if)
def unverified_notification(self, message):
notification = notify2.Notification(
diff --git a/setup.py b/setup.py
index 46798ba..75e0184 100644
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,7 @@
"ui": [
"dbus-python >= 1.2, < 1.3",
"PyGObject >= 3.36, < 3.39",
- "pydbus >= 0.6, < 0.7",
+ "dasbus == 1.71",
"notify2 >= 0.3, < 0.4",
]
},
| Replace `pydbus` dependency with a maintained alternative? Maybe `dasbus`?
Since Python has moved to 3.12 I thought I should ask if you (the maintainers of pantalaimon) are interested in replacing the unmaintained `pydbus` dependency with something that is maintained and works with modern versions of Python.
I do not know what packages would be considered a good replacement for `pydbus` to be honest, since I myself don't have much experience with Python-related development. But I thought suggesting something would not hurt the project, and here I am.
Related links:
- https://github.com/LEW21/pydbus
- https://github.com/rhinstaller/dasbus
| The problem is that https://github.com/LEW21/pydbus has been unmaintained for the last five years and is now entering a phase where it hits API breakage in the python stdlib.
For anyone who finds this, I made a fork of the repo on my [gitlab](https://gitlab.com/greenbeast/pantalaimon) and switched over to dasbus. After a bit of testing it seems to work just fine but feel free to open any issues over there.
There's also gdbus that can be imported through PyGI which would avoid adding another dependency.
> For anyone who finds this, I made a fork of the repo on my [gitlab](https://gitlab.com/greenbeast/pantalaimon) and switched over to dasbus. After a bit of testing it seems to work just fine but feel free to open any issues over there.
@chookity-pokk Would it be worth making a PR to this repo for that change?
Sorry for the late reply, @alphapapa, I spoke with @Icy-Thought and they said they were going to have a crack at it. | 2024-10-02T02:16:26 | 0.0 | [] | [] |
||
johnnychen94/jill.py | johnnychen94__jill.py-84 | 36736aa0654a1cc2c7e6d0d0322dc132cc1eb6e2 | diff --git a/jill/config/placeholders.json b/jill/config/placeholders.json
index 0428781..04ec402 100644
--- a/jill/config/placeholders.json
+++ b/jill/config/placeholders.json
@@ -10,7 +10,8 @@
"osarch": {
"win-i686": "win32",
"win-x86_64": "win64",
- "mac-x86_64": "mac64"
+ "mac-x86_64": "mac64",
+ "mac-aarch64": "macaarch64"
},
"osbit": {
"wini686": "win32",
diff --git a/jill/utils/sys_utils.py b/jill/utils/sys_utils.py
index 5087380..749ef86 100644
--- a/jill/utils/sys_utils.py
+++ b/jill/utils/sys_utils.py
@@ -24,7 +24,7 @@ def current_system():
def current_architecture():
arch = platform.machine()
- if arch.lower() == "aarch64":
+ if arch.lower() in ["aarch64", "arm64"]:
return "aarch64"
elif arch.lower() == "armv7l":
return "armv7l"
| Error installing Julia 1.7.0-beta3 on an M1 Mac
On an Apple Mac Mini with M1 CPU, I tried to use Jill to install Julia 1.7.0-beta3. This is with Jill 0.9.5, installed via `pip3`, using Python 3.9 as provided by homebrew. This is what happened:
```
$ python3 Library/Python/3.9/bin/jill download 1.7.0-beta3
downloading Julia release for 1.7.0-beta3-mac-arm64
failed to find 1.7.0-beta3-mac-arm64 in available upstreams. Please try it later.
```
The actual archive on the website is <https://julialang-s3.julialang.org/bin/mac/aarch64/1.7/julia-1.7.0-beta3-macaarch64.dmg>
I wonder if the mismatch `arm64` versus `aarch64` (resp. `mac-arm64` versus `macaarch64`) might be related?
| Thanks for trying this!
The error you see is duplicated to #82 and is largely a network issue.
The M1 part is not supported by jill yet because I don't have access to it. Currently, it will unconditionally point you to the x86_64 version.
Would you mind tell me what the following python code output is on your M1 machine?
```python
import platform
platform.machine()
platform.system()
platform.libc_ver()
```
I could possibly use this information to smartly point M1 machine to the aarch64 binaries.
```
$ python3
Python 3.9.6 (default, Jun 28 2021, 19:24:41)
[Clang 12.0.5 (clang-1205.0.22.9)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import platform
>>> platform.machine()
'arm64'
>>> platform.system()
'Darwin'
>>> platform.libc_ver()
('', '')
```
I am not so sure this is related to #82. The download fails no matter how often I try:
```
munk:~$ python3 Library/Python/3.9/bin/jill install 1.7.0-beta3
JILL - Julia Installer 4 Linux (MacOS, Windows and FreeBSD) -- Light
jill will:
1) install Julia 1.7.0-beta3 for mac-arm64 into /Applications
2) make symlinks in /Users/horn/.local/bin
You may need to manually add /Users/horn/.local/bin to PATH
Continue installation? [Y/n] y
----- Download Julia -----
downloading Julia release for 1.7.0-beta3-mac-arm64
failed to find 1.7.0-beta3-mac-arm64 in available upstreams. Please try it later.
False
munk:~$ python3 Library/Python/3.9/bin/jill install 1.7.0-beta3
JILL - Julia Installer 4 Linux (MacOS, Windows and FreeBSD) -- Light
jill will:
1) install Julia 1.7.0-beta3 for mac-arm64 into /Applications
2) make symlinks in /Users/horn/.local/bin
You may need to manually add /Users/horn/.local/bin to PATH
Continue installation? [Y/n] y
----- Download Julia -----
downloading Julia release for 1.7.0-beta3-mac-arm64
failed to find 1.7.0-beta3-mac-arm64 in available upstreams. Please try it later.
False
```
Oh yes you're right! It's a separate M1 issue! | 2021-07-28T14:11:12 | 0.0 | [] | [] |
||
edanalytics/lightbeam | edanalytics__lightbeam-58 | d36c90360fbc9e130b08eaefb8edbc0bbdae79a7 | diff --git a/lightbeam/validate.py b/lightbeam/validate.py
index 4eacc6f..b09bc9b 100644
--- a/lightbeam/validate.py
+++ b/lightbeam/validate.py
@@ -14,7 +14,6 @@ class Validator:
MAX_VALIDATION_ERRORS_TO_DISPLAY = 10
MAX_VALIDATE_TASK_QUEUE_SIZE = 100
DEFAULT_VALIDATION_METHODS = ["schema", "descriptors", "uniqueness"]
- DEFAULT_FAIL_FAST_THRESHOLD = 10
EDFI_GENERICS_TO_RESOURCES_MAPPING = {
"educationOrganizations": ["localEducationAgencies", "stateEducationAgencies", "schools"],
@@ -35,7 +34,7 @@ def __init__(self, lightbeam=None):
def validate(self):
# The below should go in __init__(), but rely on lightbeam.config which is not yet available there.
- self.fail_fast_threshold = self.lightbeam.config.get("validate",{}).get("references",{}).get("max_failures", self.DEFAULT_FAIL_FAST_THRESHOLD)
+ self.fail_fast_threshold = self.lightbeam.config.get("validate",{}).get("references",{}).get("max_failures", None)
self.validation_methods = self.lightbeam.config.get("validate",{}).get("methods",self.DEFAULT_VALIDATION_METHODS)
if type(self.validation_methods)==str and (self.validation_methods=="*" or self.validation_methods.lower()=='all'):
self.validation_methods = self.DEFAULT_VALIDATION_METHODS
@@ -219,7 +218,7 @@ async def validate_endpoint(self, endpoint):
self.lightbeam.metadata["resources"][endpoint]["records_failed"] = self.lightbeam.num_errors
# implement "fail fast" feature:
- if self.lightbeam.num_errors >= self.fail_fast_threshold:
+ if self.fail_fast_threshold is not None and self.lightbeam.num_errors >= self.fail_fast_threshold:
self.lightbeam.shutdown("validate")
self.logger.critical(f"... STOPPING; found {self.lightbeam.num_errors} >= validate.references.max_failures={self.fail_fast_threshold} VALIDATION ERRORS.")
break
@@ -235,7 +234,7 @@ async def validate_endpoint(self, endpoint):
async def do_validate_payload(self, endpoint, file_name, data, line_counter):
- if self.lightbeam.num_errors >= self.fail_fast_threshold: return
+ if self.fail_fast_threshold is not None and self.lightbeam.num_errors >= self.fail_fast_threshold: return
definition = self.get_swagger_definition_for_endpoint(endpoint)
if "Descriptor" in endpoint:
swagger = self.lightbeam.api.descriptors_swagger
| Validation `max_failures` is turned on by default, against documentation.
Reference validation is not actually optional in Lightbeam. The default value is hard-coded to 10, despite the documentation stating otherwise.
```
validate:
references:
max_failures: 10 # stop testing after X failed payloads ("fail fast")
```
This is optional; if absent, references in every payload are checked, no matter how many fail.
| 2024-10-30T16:40:03 | 0.0 | [] | [] |
|||
DIRACGrid/WebAppDIRAC | DIRACGrid__WebAppDIRAC-710 | d0ab316bbe37159a09e95138543eb33e3d367a10 | diff --git a/src/WebAppDIRAC/Core/App.py b/src/WebAppDIRAC/Core/App.py
index 93b630877..28fd41250 100644
--- a/src/WebAppDIRAC/Core/App.py
+++ b/src/WebAppDIRAC/Core/App.py
@@ -133,7 +133,7 @@ def bootstrap(self):
keyfile=Conf.HTTPSKey(),
cert_reqs=ssl.CERT_OPTIONAL,
ca_certs=Conf.generateCAFile(),
- ssl_version=ssl.PROTOCOL_TLSv1_2,
+ ssl_version="tls",
)
sslprotocol = str(Conf.SSLProtocol())
diff --git a/src/WebAppDIRAC/scripts/dirac_webapp_run.py b/src/WebAppDIRAC/scripts/dirac_webapp_run.py
index b6d2997a4..1dd687d78 100755
--- a/src/WebAppDIRAC/scripts/dirac_webapp_run.py
+++ b/src/WebAppDIRAC/scripts/dirac_webapp_run.py
@@ -2,6 +2,12 @@
import os
import sys
+import tornado.iostream
+
+tornado.iostream.SSLIOStream.configure(
+ "tornado_m2crypto.m2iostream.M2IOStream"
+) # pylint: disable=wrong-import-position
+
from DIRAC import gConfig, S_OK
from DIRAC.Core.Base.Script import Script
from DIRAC.Core.Utilities.Extensions import extensionsByPriority, getExtensionMetadata
| [5.0] Running webapp without using nginx
Leaving as draft as this needs to be thought about carefully.
BEGINRELEASENOTES
FIX: Running webapp without using nginx
ENDRELEASENOTES
| seems ok to me
Well, this is something that we said to "maybe not merge" at BiLD meetings. Opinion?
why not ? And what would the solution be then ?
The solution would be "install nginx", at least for production setup. For testing it would be useful...
Anyway, @chrisburr was not sure about the correctness of this one.
I see.
Well, we can definitely recommend to have nginx for production setup, but I can't think of anything wrong with this patch
Would we be able to put that in the next release ? Judging from Luisa's comments CTA also runs without nginx.
We are currently running off this branch, but it's not a permanent solution, especially if Janusz gets creative :-D | 2022-12-15T09:50:18 | 0.0 | [] | [] |
||
dougthor42/wafer_map | dougthor42__wafer_map-83 | 5900f5d283bfca27ce2726a246ce802c722f67b6 | diff --git a/CHANGELOG.md b/CHANGELOG.md
index 9e52503..a42705d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@ This document highlights high-level changes made to this program.
## Unreleased
+ Fix x-position of gridlines (#38)
++ The die center dots can now be toggled on and off. (#82)
## 1.1.1 / 2018-10-12
diff --git a/wafer_map/wm_core.py b/wafer_map/wm_core.py
index 8ad24fb..492920a 100644
--- a/wafer_map/wm_core.py
+++ b/wafer_map/wm_core.py
@@ -113,6 +113,7 @@ def __init__(self,
self.crosshairs_bool = True
self.reticle_gridlines_bool = False
self.legend_bool = True
+ self.die_centers = None
# timer to give a delay when moving so that buffers aren't
# re-built too many times.
@@ -140,7 +141,8 @@ def _init_ui(self):
# Draw the die and wafer objects (outline, crosshairs, etc) on the canvas
self.draw_die()
if self.plot_die_centers:
- self.draw_die_center()
+ self.die_centers = self.draw_die_center()
+ self.canvas.AddObject(self.die_centers)
self.draw_wafer_objects()
# Bind events to the canvas
@@ -254,6 +256,7 @@ def draw_die(self):
def draw_die_center(self):
"""Plot the die centers as a small dot."""
+ centers = []
for die in self.xyd:
# Determine the die's lower-left coordinate
lower_left_coord = wm_utils.grid_to_rect_coord(die[:2],
@@ -268,7 +271,9 @@ def draw_die_center(self):
0.5,
FillColor=wm_const.wm_DIE_CENTER_DOT_COLOR,
)
- self.canvas.AddObject(circ)
+ centers.append(circ)
+
+ return FloatCanvas.Group(centers)
def draw_wafer_objects(self):
"""Draw and add the various wafer objects."""
@@ -316,6 +321,19 @@ def toggle_die_gridlines(self):
self.die_gridlines_bool = True
self.canvas.Draw()
+ def toggle_die_centers(self):
+ """Toggle the die centers on and off."""
+ if self.die_centers is None:
+ self.die_centers = self.draw_die_center()
+
+ if self.plot_die_centers:
+ self.canvas.RemoveObject(self.die_centers)
+ self.plot_die_centers = False
+ else:
+ self.canvas.AddObject(self.die_centers)
+ self.plot_die_centers = True
+ self.canvas.Draw()
+
def toggle_legend(self):
"""Toggle the legend on and off."""
if self.legend_bool:
@@ -351,12 +369,14 @@ def _on_key_down(self, event):
O: Toggle wafer outline
C: Toggle wafer crosshairs
L: Toggle the legend
+ D: Toggle die centers
"""
# TODO: Decide if I want to move this to a class attribute
keycodes = {wx.WXK_HOME: self.zoom_fill, # "Home
79: self.toggle_outline, # "O"
67: self.toggle_crosshairs, # "C"
76: self.toggle_legend, # "L"
+ 68: self.toggle_die_centers, # "D"
}
# print("panel event!")
@@ -384,7 +404,8 @@ def on_color_change(self, event):
self.legend.on_color_change(event)
self.draw_die()
if self.plot_die_centers:
- self.draw_die_center()
+ self.die_centers = self.draw_die_center()
+ self.canvas.AddObject(self.die_centers)
self.draw_wafer_objects()
self.canvas.Draw(True)
# self.canvas.Unbind(FloatCanvas.EVT_MOUSEWHEEL)
diff --git a/wafer_map/wm_frame.py b/wafer_map/wm_frame.py
index d9398de..96f794b 100644
--- a/wafer_map/wm_frame.py
+++ b/wafer_map/wm_frame.py
@@ -103,6 +103,7 @@ def _init_ui(self):
# Initialize default states
self.mv_outline.Check()
self.mv_crosshairs.Check()
+ self.mv_diecenters.Check(self.plot_die_centers)
self.mv_legend.Check()
# Set the MenuBar and create a status bar (easy thanks to wx.Frame)
@@ -177,6 +178,12 @@ def _create_menu_items(self):
"Show or hide the wafer outline",
wx.ITEM_CHECK,
)
+ self.mv_diecenters = wx.MenuItem(self.mview,
+ wx.ID_ANY,
+ "Die Centers\tD",
+ "Show or hide the die centers",
+ wx.ITEM_CHECK,
+ )
self.mv_legend = wx.MenuItem(self.mview,
wx.ID_ANY,
"Legend\tL",
@@ -215,6 +222,7 @@ def _add_menu_items(self):
self.mview.AppendSeparator()
self.mview.Append(self.mv_crosshairs)
self.mview.Append(self.mv_outline)
+ self.mview.Append(self.mv_diecenters)
self.mview.Append(self.mv_legend)
self.mopts.Append(self.mo_test)
@@ -233,6 +241,7 @@ def _bind_events(self):
self.Bind(wx.EVT_MENU, self.on_quit, self.mf_close)
self.Bind(wx.EVT_MENU, self.on_zoom_fit, self.mv_zoomfit)
self.Bind(wx.EVT_MENU, self.on_toggle_crosshairs, self.mv_crosshairs)
+ self.Bind(wx.EVT_MENU, self.on_toggle_diecenters, self.mv_diecenters)
self.Bind(wx.EVT_MENU, self.on_toggle_outline, self.mv_outline)
self.Bind(wx.EVT_MENU, self.on_toggle_legend, self.mv_legend)
self.Bind(wx.EVT_MENU, self.on_change_high_color, self.mo_high_color)
@@ -258,6 +267,11 @@ def on_toggle_crosshairs(self, event):
"""Call :meth:`wafer_map.wm_core.WaferMapPanel.toggle_crosshairs()`."""
self.panel.toggle_crosshairs()
+ # TODO: I don't think I need a separate method for this
+ def on_toggle_diecenters(self, event):
+ """Call :meth:`wafer_map.wm_core.WaferMapPanel.toggle_crosshairs()`."""
+ self.panel.toggle_die_centers()
+
# TODO: I don't think I need a separate method for this
def on_toggle_outline(self, event):
"""Call the WaferMapPanel.toggle_outline() method."""
| Make the die centers a toggle.
The die centers should be able to be toggled on and off.
| 2019-10-11T22:48:19 | 0.0 | [] | [] |
|||
Yubico/yubikey-manager | Yubico__yubikey-manager-454 | bf45dadd33fbe67f6fc0ce57057debced6830ee2 | diff --git a/ykman/cli/otp.py b/ykman/cli/otp.py
index 6a386d41..47bdf16d 100644
--- a/ykman/cli/otp.py
+++ b/ykman/cli/otp.py
@@ -36,7 +36,12 @@
UpdateConfiguration,
)
from yubikit.core import TRANSPORT, CommandError
-from yubikit.core.otp import modhex_encode, modhex_decode, OtpConnection
+from yubikit.core.otp import (
+ MODHEX_ALPHABET,
+ modhex_encode,
+ modhex_decode,
+ OtpConnection,
+)
from .util import (
ykman_group,
@@ -371,8 +376,8 @@ def yubiotp(
try:
public_id = modhex_decode(public_id)
- except KeyError:
- ctx.fail("Invalid public ID, must be modhex.")
+ except ValueError:
+ ctx.fail(f"Invalid public ID, must be modhex ({MODHEX_ALPHABET}).")
if not private_id:
if generate_private_id:
diff --git a/yubikit/core/otp.py b/yubikit/core/otp.py
index f0b4406f..cf13bf9a 100644
--- a/yubikit/core/otp.py
+++ b/yubikit/core/otp.py
@@ -37,6 +37,9 @@
logger = logging.getLogger(__name__)
+MODHEX_ALPHABET = "cbdefghijklnrtuv"
+
+
class CommandRejectedError(CommandError):
"""The issues command was rejected by the YubiKey"""
@@ -70,18 +73,15 @@ def check_crc(data: bytes) -> bool:
return calculate_crc(data) == CRC_OK_RESIDUAL
-_MODHEX = "cbdefghijklnrtuv"
-
-
def modhex_encode(data: bytes) -> str:
"""Encode a bytes-like object using Modhex (modified hexadecimal) encoding."""
- return "".join(_MODHEX[b >> 4] + _MODHEX[b & 0xF] for b in data)
+ return "".join(MODHEX_ALPHABET[b >> 4] + MODHEX_ALPHABET[b & 0xF] for b in data)
def modhex_decode(string: str) -> bytes:
"""Decode the Modhex (modified hexadecimal) string."""
return bytes(
- _MODHEX.index(string[i]) << 4 | _MODHEX.index(string[i + 1])
+ MODHEX_ALPHABET.index(string[i]) << 4 | MODHEX_ALPHABET.index(string[i + 1])
for i in range(0, len(string), 2)
)
| please provide informative error message for illegal MODHEX
- YubiKey Manager (ykman) version: 4.0.0~a1
- How was it installed?: Debian package
- Operating system and version: Debian bullseye
- YubiKey model and version: YubiKey 5 Nano
- Bug description summary:
I wanted to program a yubiotp and supply my own public id, but didn't know what MODHEX is, so supplied an illegal value. The error message seems to point to a programming error. I would expect an error message that explains that the supplied value is illegal.
Steps to reproduce
---
```
ykman otp yubiotp --public-id vvvvvvvvvvvz --generate-private-id --generate-key 2
Error: substring not found
```
Expected result
---
```
ykman otp yubiotp --public-id vvvvvvvvvvvz --generate-private-id --generate-key 2
Error: not a valid MODHEX value (valid chars are c b d e f g h i j k l n r t u v): vvvvvvvvvvvz
```
| 2021-09-17T12:37:12 | 0.0 | [] | [] |
|||
zhenhuaw-me/onnxcli | zhenhuaw-me__onnxcli-18 | 5591efd848f03b454670599c19677889b3138cc4 | diff --git a/onnxcli/draw.py b/onnxcli/draw.py
index 8e828f0..29b0d6a 100644
--- a/onnxcli/draw.py
+++ b/onnxcli/draw.py
@@ -69,28 +69,45 @@ def node_key(name):
m = onnx.load_model(input_path)
dot_str = "digraph onnxcli {\n"
+ # Keep track of the original tensor names.
+ # Many tensors are not recorded in graph.value_info, thus we need to generate edge for them specially.
+ tensor_names = set()
+
# nodes
for node in m.graph.node:
nname = fixname(node.name)
nkey = node_key(node.name)
dot_str += '"{}" [label="{}\\n<{}>" fonstsize=16 shape=oval];\n'.format(nkey, nname, node.op_type)
for iname in node.input:
+ tensor_names.add(iname)
dot_str += ' "{}" -> "{}";\n'.format(tensor_key(iname), nkey)
for oname in node.output:
+ tensor_names.add(oname)
dot_str += ' "{}" -> "{}";\n'.format(nkey, tensor_key(oname))
# tensors
for tensor in m.graph.initializer:
+ tensor_names.remove(tensor.name)
dot_str += '"{}" [label="{}\\n{}, {}" fonstsize=10 style=rounded shape=rectangle];\n'.format(
tensor_key(tensor.name), fixname(tensor.name), dtype(tensor.data_type), tensor.dims
)
- for tensor in m.graph.value_info:
- dot_str += '"{}" [label="{}\\n{}, {}" fonstsize=10 shape=rectangle];\n'.format(
- tensor_key(tensor.name),
- fixname(tensor.name),
- dtype(tensor.type.tensor_type.elem_type),
- shape(tensor.type.tensor_type.shape),
- )
+ all_value_info = list(m.graph.value_info) + list(m.graph.input) + list(m.graph.output)
+ for tensor in all_value_info:
+ if tensor.name in tensor_names:
+ tensor_names.remove(tensor.name)
+ dot_str += '"{}" [label="{}\\n{}, {}" fonstsize=10 shape=rectangle];\n'.format(
+ tensor_key(tensor.name),
+ fixname(tensor.name),
+ dtype(tensor.type.tensor_type.elem_type),
+ shape(tensor.type.tensor_type.shape),
+ )
+
+ if len(tensor_names) != 0:
+ # the tensors that are not in graph.initializer nor graph.value_info
+ # i.e. they only have names
+ logger.warning("There are tensors that only have name in the graph. Suggest to run with `onnx infershape`.")
+ for tname in tensor_names:
+ dot_str += '"{}" [label="{}" fonstsize=10 shape=rectangle];\n'.format(tensor_key(tname), fixname(tname))
dot_str += "}\n"
return dot_str
| Some ONNX models don't list activation tensors in GraphProto.value_info
They should, but they don't. I am not sure why such models behave like this - they cannot pass the ONNX model checker.
There should be something wrong with the exporter. I can try to figure out which exporter has such issues.
For `onnxcli`, any functionality depending on walking `GraphProto.value_info` may not show the real model. This is not our defect, but the models'. To workaround, you can firstly run shape inference on the model, and the `GraphProto.value_info` listing issue will be fixed.
```
onnx infershape /path/to/input/model /path/to/output/model
```
| It's also causing issue for `onnx draw` - the tensors are rendered in ellipses rather than rectangles since we cannot find these tensors in `GraphProto.value_info`.
[The MobileNet from the ONNX Official model zoo](https://github.com/onnx/models/tree/master/vision/classification/mobilenet) is an example. This could be due to the old spec of ONNX. | 2022-01-25T09:18:29 | 0.0 | [] | [] |
Subsets and Splits