repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow
|
airflow-main/airflow/kubernetes/pod_launcher_deprecated.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Launches pods."""
from __future__ import annotations
import json
import math
import time
import warnings
from datetime import datetime as dt
import pendulum
import tenacity
from kubernetes import client, watch
from kubernetes.client.models.v1_pod import V1Pod
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream as kubernetes_stream
from requests.exceptions import HTTPError
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.kubernetes.kube_client import get_kube_client
from airflow.kubernetes.pod_generator import PodDefaults
from airflow.settings import pod_mutation_hook
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
warnings.warn(
"""
Please use :mod: Please use `airflow.providers.cncf.kubernetes.utils.pod_manager`
To use this module install the provider package by installing this pip package:
https://pypi.org/project/apache-airflow-providers-cncf-kubernetes/
""",
RemovedInAirflow3Warning,
stacklevel=2,
)
class PodStatus:
"""Status of the pods."""
PENDING = "pending"
RUNNING = "running"
FAILED = "failed"
SUCCEEDED = "succeeded"
class PodLauncher(LoggingMixin):
"""
Deprecated class for launching pods.
Please use airflow.providers.cncf.kubernetes.utils.pod_manager.PodManager instead.
"""
def __init__(
self,
kube_client: client.CoreV1Api = None,
in_cluster: bool = True,
cluster_context: str | None = None,
extract_xcom: bool = False,
):
"""
Deprecated class for launching pods.
Please use airflow.providers.cncf.kubernetes.utils.pod_manager.PodManager
instead to create the launcher.
:param kube_client: kubernetes client
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param extract_xcom: whether we should extract xcom
"""
super().__init__()
self._client = kube_client or get_kube_client(in_cluster=in_cluster, cluster_context=cluster_context)
self._watch = watch.Watch()
self.extract_xcom = extract_xcom
def run_pod_async(self, pod: V1Pod, **kwargs):
"""Runs pod asynchronously."""
pod_mutation_hook(pod)
sanitized_pod = self._client.api_client.sanitize_for_serialization(pod)
json_pod = json.dumps(sanitized_pod, indent=2)
self.log.debug("Pod Creation Request: \n%s", json_pod)
try:
resp = self._client.create_namespaced_pod(
body=sanitized_pod, namespace=pod.metadata.namespace, **kwargs
)
self.log.debug("Pod Creation Response: %s", resp)
except Exception as e:
self.log.exception("Exception when attempting to create Namespaced Pod: %s", json_pod)
raise e
return resp
def delete_pod(self, pod: V1Pod):
"""Deletes pod."""
try:
self._client.delete_namespaced_pod(
pod.metadata.name, pod.metadata.namespace, body=client.V1DeleteOptions()
)
except ApiException as e:
# If the pod is already deleted
if e.status != 404:
raise
def start_pod(self, pod: V1Pod, startup_timeout: int = 120):
"""
Launches the pod synchronously and waits for completion.
:param pod:
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
:return:
"""
resp = self.run_pod_async(pod)
curr_time = dt.now()
if resp.status.start_time is None:
while self.pod_not_started(pod):
self.log.warning("Pod not yet started: %s", pod.metadata.name)
delta = dt.now() - curr_time
if delta.total_seconds() >= startup_timeout:
raise AirflowException("Pod took too long to start")
time.sleep(1)
def monitor_pod(self, pod: V1Pod, get_logs: bool) -> tuple[State, str | None]:
"""
Monitors a pod and returns the final state.
:param pod: pod spec that will be monitored
:param get_logs: whether to read the logs locally
"""
if get_logs:
read_logs_since_sec = None
last_log_time = None
while True:
logs = self.read_pod_logs(pod, timestamps=True, since_seconds=read_logs_since_sec)
for line in logs:
timestamp, message = self.parse_log_line(line.decode("utf-8"))
if timestamp:
last_log_time = pendulum.parse(timestamp)
self.log.info(message)
time.sleep(1)
if not self.base_container_is_running(pod):
break
self.log.warning("Pod %s log read interrupted", pod.metadata.name)
if last_log_time:
delta = pendulum.now() - last_log_time
# Prefer logs duplication rather than loss
read_logs_since_sec = math.ceil(delta.total_seconds())
result = None
if self.extract_xcom:
while self.base_container_is_running(pod):
self.log.info("Container %s has state %s", pod.metadata.name, State.RUNNING)
time.sleep(2)
result = self._extract_xcom(pod)
self.log.info(result)
result = json.loads(result)
while self.pod_is_running(pod):
self.log.info("Pod %s has state %s", pod.metadata.name, State.RUNNING)
time.sleep(2)
return self._task_status(self.read_pod(pod)), result
def parse_log_line(self, line: str) -> tuple[str | None, str]:
"""
Parse K8s log line and returns the final state.
:param line: k8s log line
:return: timestamp and log message
"""
split_at = line.find(" ")
if split_at == -1:
self.log.error(
"Error parsing timestamp (no timestamp in message: %r). "
"Will continue execution but won't update timestamp",
line,
)
return None, line
timestamp = line[:split_at]
message = line[split_at + 1 :].rstrip()
return timestamp, message
def _task_status(self, event):
self.log.info("Event: %s had an event of type %s", event.metadata.name, event.status.phase)
status = self.process_status(event.metadata.name, event.status.phase)
return status
def pod_not_started(self, pod: V1Pod):
"""Tests if pod has not started."""
state = self._task_status(self.read_pod(pod))
return state == State.QUEUED
def pod_is_running(self, pod: V1Pod):
"""Tests if pod is running."""
state = self._task_status(self.read_pod(pod))
return state not in (State.SUCCESS, State.FAILED)
def base_container_is_running(self, pod: V1Pod):
"""Tests if base container is running."""
event = self.read_pod(pod)
status = next((s for s in event.status.container_statuses if s.name == "base"), None)
if not status:
return False
return status.state.running is not None
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_logs(
self,
pod: V1Pod,
tail_lines: int | None = None,
timestamps: bool = False,
since_seconds: int | None = None,
):
"""Reads log from the pod."""
additional_kwargs = {}
if since_seconds:
additional_kwargs["since_seconds"] = since_seconds
if tail_lines:
additional_kwargs["tail_lines"] = tail_lines
try:
return self._client.read_namespaced_pod_log(
name=pod.metadata.name,
namespace=pod.metadata.namespace,
container="base",
follow=True,
timestamps=timestamps,
_preload_content=False,
**additional_kwargs,
)
except HTTPError as e:
raise AirflowException(f"There was an error reading the kubernetes API: {e}")
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod_events(self, pod):
"""Reads events from the pod."""
try:
return self._client.list_namespaced_event(
namespace=pod.metadata.namespace, field_selector=f"involvedObject.name={pod.metadata.name}"
)
except HTTPError as e:
raise AirflowException(f"There was an error reading the kubernetes API: {e}")
@tenacity.retry(stop=tenacity.stop_after_attempt(3), wait=tenacity.wait_exponential(), reraise=True)
def read_pod(self, pod: V1Pod):
"""Read pod information."""
try:
return self._client.read_namespaced_pod(pod.metadata.name, pod.metadata.namespace)
except HTTPError as e:
raise AirflowException(f"There was an error reading the kubernetes API: {e}")
def _extract_xcom(self, pod: V1Pod):
resp = kubernetes_stream(
self._client.connect_get_namespaced_pod_exec,
pod.metadata.name,
pod.metadata.namespace,
container=PodDefaults.SIDECAR_CONTAINER_NAME,
command=["/bin/sh"],
stdin=True,
stdout=True,
stderr=True,
tty=False,
_preload_content=False,
)
try:
result = self._exec_pod_command(resp, f"cat {PodDefaults.XCOM_MOUNT_PATH}/return.json")
self._exec_pod_command(resp, "kill -s SIGINT 1")
finally:
resp.close()
if result is None:
raise AirflowException(f"Failed to extract xcom from pod: {pod.metadata.name}")
return result
def _exec_pod_command(self, resp, command):
if resp.is_open():
self.log.info("Running command... %s\n", command)
resp.write_stdin(command + "\n")
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
return resp.read_stdout()
if resp.peek_stderr():
self.log.info(resp.read_stderr())
break
return None
def process_status(self, job_id, status):
"""Process status information for the job."""
status = status.lower()
if status == PodStatus.PENDING:
return State.QUEUED
elif status == PodStatus.FAILED:
self.log.error("Event with job id %s Failed", job_id)
return State.FAILED
elif status == PodStatus.SUCCEEDED:
self.log.info("Event with job id %s Succeeded", job_id)
return State.SUCCESS
elif status == PodStatus.RUNNING:
return State.RUNNING
else:
self.log.error("Event: Invalid state %s on job %s", status, job_id)
return State.FAILED
| 12,009 | 36.41433 | 109 |
py
|
airflow
|
airflow-main/airflow/kubernetes/k8s_model.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Classes for interacting with Kubernetes API."""
from __future__ import annotations
from abc import ABC, abstractmethod
from functools import reduce
from kubernetes.client import models as k8s
class K8SModel(ABC):
"""
Airflow Kubernetes models are here for backwards compatibility reasons only.
Ideally clients should use the kubernetes API
and the process of
client input -> Airflow k8s models -> k8s models
can be avoided. All of these models implement the
`attach_to_pod` method so that they integrate with the kubernetes client.
"""
@abstractmethod
def attach_to_pod(self, pod: k8s.V1Pod) -> k8s.V1Pod:
"""
Attaches to pod.
:param pod: A pod to attach this Kubernetes object to
:return: The pod with the object attached
"""
def append_to_pod(pod: k8s.V1Pod, k8s_objects: list[K8SModel] | None):
"""
Attach additional specs to an existing pod object.
:param pod: A pod to attach a list of Kubernetes objects to
:param k8s_objects: a potential None list of K8SModels
:return: pod with the objects attached if they exist
"""
if not k8s_objects:
return pod
return reduce(lambda p, o: o.attach_to_pod(p), k8s_objects, pod)
| 2,045 | 33.1 | 80 |
py
|
airflow
|
airflow-main/airflow/kubernetes/volume_mount.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`kubernetes.client.models.V1VolumeMount`."""
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
from airflow.providers.cncf.kubernetes.backcompat.volume_mount import VolumeMount # noqa: autoflake
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models.V1VolumeMount`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,340 | 38.441176 | 104 |
py
|
airflow
|
airflow-main/airflow/kubernetes/kube_config.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.settings import AIRFLOW_HOME
class KubeConfig:
"""Configuration for Kubernetes."""
core_section = "core"
kubernetes_section = "kubernetes_executor"
logging_section = "logging"
def __init__(self):
configuration_dict = conf.as_dict(display_sensitive=True)
self.core_configuration = configuration_dict[self.core_section]
self.airflow_home = AIRFLOW_HOME
self.dags_folder = conf.get(self.core_section, "dags_folder")
self.parallelism = conf.getint(self.core_section, "parallelism")
self.pod_template_file = conf.get(self.kubernetes_section, "pod_template_file", fallback=None)
self.delete_worker_pods = conf.getboolean(self.kubernetes_section, "delete_worker_pods")
self.delete_worker_pods_on_failure = conf.getboolean(
self.kubernetes_section, "delete_worker_pods_on_failure"
)
self.worker_pods_creation_batch_size = conf.getint(
self.kubernetes_section, "worker_pods_creation_batch_size"
)
self.worker_container_repository = conf.get(self.kubernetes_section, "worker_container_repository")
self.worker_container_tag = conf.get(self.kubernetes_section, "worker_container_tag")
if self.worker_container_repository and self.worker_container_tag:
self.kube_image = f"{self.worker_container_repository}:{self.worker_container_tag}"
else:
self.kube_image = None
# The Kubernetes Namespace in which the Scheduler and Webserver reside. Note
# that if your
# cluster has RBAC enabled, your scheduler may need service account permissions to
# create, watch, get, and delete pods in this namespace.
self.kube_namespace = conf.get(self.kubernetes_section, "namespace")
self.multi_namespace_mode = conf.getboolean(self.kubernetes_section, "multi_namespace_mode")
if self.multi_namespace_mode and conf.get(
self.kubernetes_section, "multi_namespace_mode_namespace_list"
):
self.multi_namespace_mode_namespace_list = conf.get(
self.kubernetes_section, "multi_namespace_mode_namespace_list"
).split(",")
else:
self.multi_namespace_mode_namespace_list = None
# The Kubernetes Namespace in which pods will be created by the executor. Note
# that if your
# cluster has RBAC enabled, your workers may need service account permissions to
# interact with cluster components.
self.executor_namespace = conf.get(self.kubernetes_section, "namespace")
self.worker_pods_queued_check_interval = conf.getint(
self.kubernetes_section, "worker_pods_queued_check_interval"
)
self.kube_client_request_args = conf.getjson(
self.kubernetes_section, "kube_client_request_args", fallback={}
)
if not isinstance(self.kube_client_request_args, dict):
raise AirflowConfigException(
f"[{self.kubernetes_section}] 'kube_client_request_args' expected a JSON dict, got "
+ type(self.kube_client_request_args).__name__
)
if self.kube_client_request_args:
if "_request_timeout" in self.kube_client_request_args and isinstance(
self.kube_client_request_args["_request_timeout"], list
):
self.kube_client_request_args["_request_timeout"] = tuple(
self.kube_client_request_args["_request_timeout"]
)
self.delete_option_kwargs = conf.getjson(self.kubernetes_section, "delete_option_kwargs", fallback={})
if not isinstance(self.delete_option_kwargs, dict):
raise AirflowConfigException(
f"[{self.kubernetes_section}] 'delete_option_kwargs' expected a JSON dict, got "
+ type(self.delete_option_kwargs).__name__
)
| 4,852 | 48.020202 | 110 |
py
|
airflow
|
airflow-main/airflow/kubernetes/pod_runtime_info_env.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`kubernetes.client.models.V1EnvVar`."""
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
from airflow.providers.cncf.kubernetes.backcompat.pod_runtime_info_env import PodRuntimeInfoEnv # noqa
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models.V1EnvVar`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,333 | 38.235294 | 107 |
py
|
airflow
|
airflow-main/airflow/kubernetes/pod_generator_deprecated.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Backwards compatibility for Pod generation.
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
from __future__ import annotations
import copy
import uuid
import re2
from kubernetes.client import models as k8s
from airflow.utils.hashlib_wrapper import md5
MAX_POD_ID_LEN = 253
MAX_LABEL_LEN = 63
class PodDefaults:
"""Static defaults for Pods."""
XCOM_MOUNT_PATH = "/airflow/xcom"
SIDECAR_CONTAINER_NAME = "airflow-xcom-sidecar"
XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
VOLUME_MOUNT = k8s.V1VolumeMount(name="xcom", mount_path=XCOM_MOUNT_PATH)
VOLUME = k8s.V1Volume(name="xcom", empty_dir=k8s.V1EmptyDirVolumeSource())
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=["sh", "-c", XCOM_CMD],
image="alpine",
volume_mounts=[VOLUME_MOUNT],
resources=k8s.V1ResourceRequirements(
requests={
"cpu": "1m",
}
),
)
def make_safe_label_value(string):
"""
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic.
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
:param image: The docker image
:param name: name in the metadata section (not the container name)
:param namespace: pod namespace
:param volume_mounts: list of kubernetes volumes mounts
:param envs: A dict containing the environment variables
:param cmds: The command to be run on the first container
:param args: The arguments to be run on the pod
:param labels: labels for the pod metadata
:param node_selectors: node selectors for the pod
:param ports: list of ports. Applies to the first container.
:param volumes: Volumes to be attached to the first container
:param image_pull_policy: Specify a policy to cache or always pull an image
:param restart_policy: The restart policy of the pod
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:param init_containers: A list of init containers
:param service_account_name: Identity for processes that run in a Pod
:param resources: Resource requirements for the first containers
:param annotations: annotations for the pod
:param affinity: A dict containing a group of affinity scheduling rules
:param hostnetwork: If True enable host networking on the pod
:param tolerations: A list of kubernetes tolerations
:param security_context: A dict containing the security context for the pod
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:param dnspolicy: Specify a dnspolicy for the pod
:param schedulername: Specify a schedulername for the pod
:param pod: The fully specified pod. Mutually exclusive with `path_or_string`
:param extract_xcom: Whether to bring up a container for xcom
:param priority_class_name: priority class name for the launched Pod
"""
def __init__(
self,
image: str | None = None,
name: str | None = None,
namespace: str | None = None,
volume_mounts: list[k8s.V1VolumeMount | dict] | None = None,
envs: dict[str, str] | None = None,
cmds: list[str] | None = None,
args: list[str] | None = None,
labels: dict[str, str] | None = None,
node_selectors: dict[str, str] | None = None,
ports: list[k8s.V1ContainerPort | dict] | None = None,
volumes: list[k8s.V1Volume | dict] | None = None,
image_pull_policy: str | None = None,
restart_policy: str | None = None,
image_pull_secrets: str | None = None,
init_containers: list[k8s.V1Container] | None = None,
service_account_name: str | None = None,
resources: k8s.V1ResourceRequirements | dict | None = None,
annotations: dict[str, str] | None = None,
affinity: dict | None = None,
hostnetwork: bool = False,
tolerations: list | None = None,
security_context: k8s.V1PodSecurityContext | dict | None = None,
configmaps: list[str] | None = None,
dnspolicy: str | None = None,
schedulername: str | None = None,
extract_xcom: bool = False,
priority_class_name: str | None = None,
):
self.pod = k8s.V1Pod()
self.pod.api_version = "v1"
self.pod.kind = "Pod"
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name="base")
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(name=key, value=val))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(
k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))
)
self.container.command = cmds or []
self.container.args = args or []
if image_pull_policy:
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
if dnspolicy:
self.spec.dns_policy = dnspolicy
self.spec.scheduler_name = schedulername
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
if restart_policy:
self.spec.restart_policy = restart_policy
self.spec.priority_class_name = priority_class_name
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(","):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(name=image_pull_secret))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod."""
result = None
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
result.metadata.name = self.make_unique_pod_id(result.metadata.name)
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar."""
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> k8s.V1Pod | None:
"""Converts to pod from obj."""
if obj is None:
return None
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
"Cannot convert a non-dictionary or non-PodGenerator "
"object into a KubernetesExecutorConfig"
)
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
if not namespaced:
return None
resources = namespaced.get("resources")
if resources is None:
requests = {
"cpu": namespaced.get("request_cpu"),
"memory": namespaced.get("request_memory"),
"ephemeral-storage": namespaced.get("ephemeral-storage"),
}
limits = {
"cpu": namespaced.get("limit_cpu"),
"memory": namespaced.get("limit_memory"),
"ephemeral-storage": namespaced.get("ephemeral-storage"),
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(requests=requests, limits=limits)
namespaced["resources"] = resources
return PodGenerator(**namespaced).gen_pod()
@staticmethod
def make_unique_pod_id(dag_id):
r"""
Generate a unique Pod name.
Kubernetes pod names must be <= 253 chars and must pass the following regex for
validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
:param dag_id: a dag_id with only alphanumeric characters
:return: ``str`` valid Pod name of appropriate length
"""
if not dag_id:
return None
safe_uuid = uuid.uuid4().hex
safe_pod_id = dag_id[: MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
return safe_pod_id
| 11,962 | 37.71521 | 103 |
py
|
airflow
|
airflow-main/airflow/kubernetes/kubernetes_helper_functions.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
import secrets
import string
from typing import TYPE_CHECKING
import pendulum
from slugify import slugify
from airflow.compat.functools import cache
from airflow.configuration import conf
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
log = logging.getLogger(__name__)
alphanum_lower = string.ascii_lowercase + string.digits
def rand_str(num):
"""Generate random lowercase alphanumeric string of length num.
:meta private:
"""
return "".join(secrets.choice(alphanum_lower) for _ in range(num))
def add_pod_suffix(*, pod_name, rand_len=8, max_len=80):
"""Add random string to pod name while staying under max len."""
suffix = "-" + rand_str(rand_len)
return pod_name[: max_len - len(suffix)].strip("-.") + suffix
def create_pod_id(
dag_id: str | None = None,
task_id: str | None = None,
*,
max_length: int = 80,
unique: bool = True,
) -> str:
"""
Generates unique pod ID given a dag_id and / or task_id.
The default of 80 for max length is somewhat arbitrary, mainly a balance between
content and not overwhelming terminal windows of reasonable width. The true
upper limit is 253, and this is enforced in construct_pod.
:param dag_id: DAG ID
:param task_id: Task ID
:param max_length: max number of characters
:param unique: whether a random string suffix should be added
:return: A valid identifier for a kubernetes pod name
"""
if not (dag_id or task_id):
raise ValueError("Must supply either dag_id or task_id.")
name = ""
if dag_id:
name += dag_id
if task_id:
if name:
name += "-"
name += task_id
base_name = slugify(name, lowercase=True)[:max_length].strip(".-")
if unique:
return add_pod_suffix(pod_name=base_name, rand_len=8, max_len=max_length)
else:
return base_name
def annotations_to_key(annotations: dict[str, str]) -> TaskInstanceKey:
"""Build a TaskInstanceKey based on pod annotations."""
log.debug("Creating task key for annotations %s", annotations)
dag_id = annotations["dag_id"]
task_id = annotations["task_id"]
try_number = int(annotations["try_number"])
annotation_run_id = annotations.get("run_id")
map_index = int(annotations.get("map_index", -1))
# Compat: Look up the run_id from the TI table!
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
from airflow.settings import Session
if not annotation_run_id and "execution_date" in annotations:
execution_date = pendulum.parse(annotations["execution_date"])
# Do _not_ use create-session, we don't want to expunge
session = Session()
task_instance_run_id = (
session.query(TaskInstance.run_id)
.join(TaskInstance.dag_run)
.filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id == task_id,
DagRun.execution_date == execution_date,
)
.scalar()
)
else:
task_instance_run_id = annotation_run_id
return TaskInstanceKey(
dag_id=dag_id,
task_id=task_id,
run_id=task_instance_run_id,
try_number=try_number,
map_index=map_index,
)
@cache
def get_logs_task_metadata() -> bool:
return conf.getboolean("kubernetes_executor", "logs_task_metadata", fallback=False)
def annotations_for_logging_task_metadata(annotation_set):
if get_logs_task_metadata():
annotations_for_logging = annotation_set
else:
annotations_for_logging = "<omitted>"
return annotations_for_logging
| 4,556 | 31.55 | 87 |
py
|
airflow
|
airflow-main/airflow/kubernetes/volume.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`kubernetes.client.models.V1Volume`."""
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
from airflow.providers.cncf.kubernetes.backcompat.volume import Volume # noqa: autoflake
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models.V1Volume`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,319 | 37.823529 | 93 |
py
|
airflow
|
airflow-main/airflow/kubernetes/pod.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`kubernetes.client.models` for `V1ResourceRequirements` and `Port`.
"""
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
# flake8: noqa
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
from airflow.providers.cncf.kubernetes.backcompat.pod import Port, Resources
warnings.warn(
"This module is deprecated. Please use `kubernetes.client.models` for `V1ResourceRequirements` and `Port`.",
RemovedInAirflow3Warning,
stacklevel=2,
)
| 1,386 | 34.564103 | 112 |
py
|
airflow
|
airflow-main/airflow/kubernetes/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/kubernetes/kube_client.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Client for kubernetes communication."""
from __future__ import annotations
import logging
import urllib3.util
from airflow.configuration import conf
log = logging.getLogger(__name__)
try:
from kubernetes import client, config
from kubernetes.client import Configuration
from kubernetes.client.rest import ApiException
has_kubernetes = True
def _get_default_configuration() -> Configuration:
if hasattr(Configuration, "get_default_copy"):
return Configuration.get_default_copy()
return Configuration()
def _disable_verify_ssl() -> None:
configuration = _get_default_configuration()
configuration.verify_ssl = False
Configuration.set_default(configuration)
except ImportError as e:
# We need an exception class to be able to use it in ``except`` elsewhere
# in the code base
ApiException = BaseException
has_kubernetes = False
_import_err = e
def _enable_tcp_keepalive() -> None:
"""
This function enables TCP keepalive mechanism.
This prevents urllib3 connection to hang indefinitely when idle connection
is time-outed on services like cloud load balancers or firewalls.
See https://github.com/apache/airflow/pull/11406 for detailed explanation.
Please ping @michalmisiewicz or @dimberman in the PR if you want to modify this function.
"""
import socket
from urllib3.connection import HTTPConnection, HTTPSConnection
tcp_keep_idle = conf.getint("kubernetes_executor", "tcp_keep_idle")
tcp_keep_intvl = conf.getint("kubernetes_executor", "tcp_keep_intvl")
tcp_keep_cnt = conf.getint("kubernetes_executor", "tcp_keep_cnt")
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
if hasattr(socket, "TCP_KEEPIDLE"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keep_idle))
else:
log.debug("Unable to set TCP_KEEPIDLE on this platform")
if hasattr(socket, "TCP_KEEPINTVL"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keep_intvl))
else:
log.debug("Unable to set TCP_KEEPINTVL on this platform")
if hasattr(socket, "TCP_KEEPCNT"):
socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keep_cnt))
else:
log.debug("Unable to set TCP_KEEPCNT on this platform")
HTTPSConnection.default_socket_options = HTTPSConnection.default_socket_options + socket_options
HTTPConnection.default_socket_options = HTTPConnection.default_socket_options + socket_options
def get_kube_client(
in_cluster: bool = conf.getboolean("kubernetes_executor", "in_cluster"),
cluster_context: str | None = None,
config_file: str | None = None,
) -> client.CoreV1Api:
"""
Retrieves Kubernetes client.
:param in_cluster: whether we are in cluster
:param cluster_context: context of the cluster
:param config_file: configuration file
:return kubernetes client
:rtype client.CoreV1Api
"""
if not has_kubernetes:
raise _import_err
if conf.getboolean("kubernetes_executor", "enable_tcp_keepalive"):
_enable_tcp_keepalive()
configuration = _get_default_configuration()
api_client_retry_configuration = conf.getjson(
"kubernetes_executor", "api_client_retry_configuration", fallback={}
)
if not conf.getboolean("kubernetes_executor", "verify_ssl"):
_disable_verify_ssl()
if isinstance(api_client_retry_configuration, dict):
configuration.retries = urllib3.util.Retry(**api_client_retry_configuration)
else:
raise ValueError("api_client_retry_configuration should be a dictionary")
if in_cluster:
config.load_incluster_config(client_configuration=configuration)
else:
if cluster_context is None:
cluster_context = conf.get("kubernetes_executor", "cluster_context", fallback=None)
if config_file is None:
config_file = conf.get("kubernetes_executor", "config_file", fallback=None)
config.load_kube_config(
config_file=config_file, context=cluster_context, client_configuration=configuration
)
if not conf.getboolean("kubernetes_executor", "verify_ssl"):
configuration.verify_ssl = False
ssl_ca_cert = conf.get("kubernetes_executor", "ssl_ca_cert")
if ssl_ca_cert:
configuration.ssl_ca_cert = ssl_ca_cert
api_client = client.ApiClient(configuration=configuration)
return client.CoreV1Api(api_client)
| 5,310 | 35.627586 | 100 |
py
|
airflow
|
airflow-main/airflow/kubernetes/pod_launcher.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module is deprecated.
Please use :mod:`kubernetes.client.models` for V1ResourceRequirements and Port.
"""
from __future__ import annotations
from airflow.kubernetes.pod_launcher_deprecated import PodLauncher, PodStatus # noqa: autoflake
| 1,036 | 38.884615 | 96 |
py
|
airflow
|
airflow-main/airflow/kubernetes/pod_generator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Pod generator.
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
from __future__ import annotations
import copy
import datetime
import logging
import os
import warnings
from functools import reduce
import re2
from dateutil import parser
from kubernetes.client import models as k8s
from kubernetes.client.api_client import ApiClient
from airflow.exceptions import (
AirflowConfigException,
PodMutationHookException,
PodReconciliationError,
RemovedInAirflow3Warning,
)
from airflow.kubernetes.kubernetes_helper_functions import add_pod_suffix, rand_str
from airflow.kubernetes.pod_generator_deprecated import PodDefaults, PodGenerator as PodGeneratorDeprecated
from airflow.utils import yaml
from airflow.utils.hashlib_wrapper import md5
from airflow.version import version as airflow_version
log = logging.getLogger(__name__)
MAX_LABEL_LEN = 63
def make_safe_label_value(string: str) -> str:
"""
Normalize a provided label to be of valid length and characters.
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label
def datetime_to_label_safe_datestring(datetime_obj: datetime.datetime) -> str:
"""
Transform a datetime string to use as a label.
Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but
not "_" let's
replace ":" with "_"
:param datetime_obj: datetime.datetime object
:return: ISO-like string representing the datetime
"""
return datetime_obj.isoformat().replace(":", "_").replace("+", "_plus_")
def label_safe_datestring_to_datetime(string: str) -> datetime.datetime:
"""
Transform a label back to a datetime object.
Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not
"_", let's
replace ":" with "_"
:param string: str
:return: datetime.datetime object
"""
return parser.parse(string.replace("_plus_", "+").replace("_", ":"))
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic.
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
:param pod: The fully specified pod. Mutually exclusive with `pod_template_file`
:param pod_template_file: Path to YAML file. Mutually exclusive with `pod`
:param extract_xcom: Whether to bring up a container for xcom
"""
def __init__(
self,
pod: k8s.V1Pod | None = None,
pod_template_file: str | None = None,
extract_xcom: bool = True,
):
if not pod_template_file and not pod:
raise AirflowConfigException(
"Podgenerator requires either a `pod` or a `pod_template_file` argument"
)
if pod_template_file and pod:
raise AirflowConfigException("Cannot pass both `pod` and `pod_template_file` arguments")
if pod_template_file:
self.ud_pod = self.deserialize_model_file(pod_template_file)
else:
self.ud_pod = pod
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod."""
warnings.warn("This function is deprecated. ", RemovedInAirflow3Warning)
result = self.ud_pod
result.metadata.name = add_pod_suffix(pod_name=result.metadata.name)
if self.extract_xcom:
result = self.add_xcom_sidecar(result)
return result
@staticmethod
def add_xcom_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar."""
warnings.warn(
"This function is deprecated. "
"Please use airflow.providers.cncf.kubernetes.utils.xcom_sidecar.add_xcom_sidecar instead"
)
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> dict | k8s.V1Pod | None:
"""Converts to pod from obj."""
if obj is None:
return None
k8s_legacy_object = obj.get("KubernetesExecutor", None)
k8s_object = obj.get("pod_override", None)
if k8s_legacy_object and k8s_object:
raise AirflowConfigException(
"Can not have both a legacy and new"
"executor_config object. Please delete the KubernetesExecutor"
"dict and only use the pod_override kubernetes.client.models.V1Pod"
"object."
)
if not k8s_object and not k8s_legacy_object:
return None
if isinstance(k8s_object, k8s.V1Pod):
return k8s_object
elif isinstance(k8s_legacy_object, dict):
warnings.warn(
"Using a dictionary for the executor_config is deprecated and will soon be removed."
'please use a `kubernetes.client.models.V1Pod` class with a "pod_override" key'
" instead. ",
category=RemovedInAirflow3Warning,
)
return PodGenerator.from_legacy_obj(obj)
else:
raise TypeError(
"Cannot convert a non-kubernetes.client.models.V1Pod object into a KubernetesExecutorConfig"
)
@staticmethod
def from_legacy_obj(obj) -> k8s.V1Pod | None:
"""Converts to pod from obj."""
if obj is None:
return None
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
if not namespaced:
return None
resources = namespaced.get("resources")
if resources is None:
requests = {
"cpu": namespaced.pop("request_cpu", None),
"memory": namespaced.pop("request_memory", None),
"ephemeral-storage": namespaced.get("ephemeral-storage"), # We pop this one in limits
}
limits = {
"cpu": namespaced.pop("limit_cpu", None),
"memory": namespaced.pop("limit_memory", None),
"ephemeral-storage": namespaced.pop("ephemeral-storage", None),
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
# remove None's so they don't become 0's
requests = {k: v for k, v in requests.items() if v is not None}
limits = {k: v for k, v in limits.items() if v is not None}
resources = k8s.V1ResourceRequirements(requests=requests, limits=limits)
namespaced["resources"] = resources
return PodGeneratorDeprecated(**namespaced).gen_pod()
@staticmethod
def reconcile_pods(base_pod: k8s.V1Pod, client_pod: k8s.V1Pod | None) -> k8s.V1Pod:
"""
Merge Kubernetes Pod objects.
:param base_pod: has the base attributes which are overwritten if they exist
in the client pod and remain if they do not exist in the client_pod
:param client_pod: the pod that the client wants to create.
:return: the merged pods
This can't be done recursively as certain fields are overwritten and some are concatenated.
"""
if client_pod is None:
return base_pod
client_pod_cp = copy.deepcopy(client_pod)
client_pod_cp.spec = PodGenerator.reconcile_specs(base_pod.spec, client_pod_cp.spec)
client_pod_cp.metadata = PodGenerator.reconcile_metadata(base_pod.metadata, client_pod_cp.metadata)
client_pod_cp = merge_objects(base_pod, client_pod_cp)
return client_pod_cp
@staticmethod
def reconcile_metadata(base_meta, client_meta):
"""
Merge Kubernetes Metadata objects.
:param base_meta: has the base attributes which are overwritten if they exist
in the client_meta and remain if they do not exist in the client_meta
:param client_meta: the spec that the client wants to create.
:return: the merged specs
"""
if base_meta and not client_meta:
return base_meta
if not base_meta and client_meta:
return client_meta
elif client_meta and base_meta:
client_meta.labels = merge_objects(base_meta.labels, client_meta.labels)
client_meta.annotations = merge_objects(base_meta.annotations, client_meta.annotations)
extend_object_field(base_meta, client_meta, "managed_fields")
extend_object_field(base_meta, client_meta, "finalizers")
extend_object_field(base_meta, client_meta, "owner_references")
return merge_objects(base_meta, client_meta)
return None
@staticmethod
def reconcile_specs(
base_spec: k8s.V1PodSpec | None, client_spec: k8s.V1PodSpec | None
) -> k8s.V1PodSpec | None:
"""
Merge Kubernetes PodSpec objects.
:param base_spec: has the base attributes which are overwritten if they exist
in the client_spec and remain if they do not exist in the client_spec
:param client_spec: the spec that the client wants to create.
:return: the merged specs
"""
if base_spec and not client_spec:
return base_spec
if not base_spec and client_spec:
return client_spec
elif client_spec and base_spec:
client_spec.containers = PodGenerator.reconcile_containers(
base_spec.containers, client_spec.containers
)
merged_spec = extend_object_field(base_spec, client_spec, "init_containers")
merged_spec = extend_object_field(base_spec, merged_spec, "volumes")
return merge_objects(base_spec, merged_spec)
return None
@staticmethod
def reconcile_containers(
base_containers: list[k8s.V1Container], client_containers: list[k8s.V1Container]
) -> list[k8s.V1Container]:
"""
Merge Kubernetes Container objects.
:param base_containers: has the base attributes which are overwritten if they exist
in the client_containers and remain if they do not exist in the client_containers
:param client_containers: the containers that the client wants to create.
:return: the merged containers
The runs recursively over the list of containers.
"""
if not base_containers:
return client_containers
if not client_containers:
return base_containers
client_container = client_containers[0]
base_container = base_containers[0]
client_container = extend_object_field(base_container, client_container, "volume_mounts")
client_container = extend_object_field(base_container, client_container, "env")
client_container = extend_object_field(base_container, client_container, "env_from")
client_container = extend_object_field(base_container, client_container, "ports")
client_container = extend_object_field(base_container, client_container, "volume_devices")
client_container = merge_objects(base_container, client_container)
return [client_container] + PodGenerator.reconcile_containers(
base_containers[1:], client_containers[1:]
)
@classmethod
def construct_pod(
cls,
dag_id: str,
task_id: str,
pod_id: str,
try_number: int,
kube_image: str,
date: datetime.datetime | None,
args: list[str],
pod_override_object: k8s.V1Pod | None,
base_worker_pod: k8s.V1Pod,
namespace: str,
scheduler_job_id: str,
run_id: str | None = None,
map_index: int = -1,
*,
with_mutation_hook: bool = False,
) -> k8s.V1Pod:
"""
Create a Pod.
Construct a pod by gathering and consolidating the configuration from 3 places:
- airflow.cfg
- executor_config
- dynamic arguments
"""
if len(pod_id) > 253:
warnings.warn(
"pod_id supplied is longer than 253 characters; truncating and adding unique suffix."
)
pod_id = add_pod_suffix(pod_name=pod_id, max_len=253)
try:
image = pod_override_object.spec.containers[0].image # type: ignore
if not image:
image = kube_image
except Exception:
image = kube_image
annotations = {
"dag_id": dag_id,
"task_id": task_id,
"try_number": str(try_number),
}
if map_index >= 0:
annotations["map_index"] = str(map_index)
if date:
annotations["execution_date"] = date.isoformat()
if run_id:
annotations["run_id"] = run_id
dynamic_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(
namespace=namespace,
annotations=annotations,
name=pod_id,
labels=cls.build_labels_for_k8s_executor_pod(
dag_id=dag_id,
task_id=task_id,
try_number=try_number,
airflow_worker=scheduler_job_id,
map_index=map_index,
execution_date=date,
run_id=run_id,
),
),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(
name="base",
args=args,
image=image,
env=[k8s.V1EnvVar(name="AIRFLOW_IS_K8S_EXECUTOR_POD", value="True")],
)
]
),
)
# Reconcile the pods starting with the first chronologically,
# Pod from the pod_template_File -> Pod from executor_config arg -> Pod from the K8s executor
pod_list = [base_worker_pod, pod_override_object, dynamic_pod]
try:
pod = reduce(PodGenerator.reconcile_pods, pod_list)
except Exception as e:
raise PodReconciliationError from e
if with_mutation_hook:
from airflow.settings import pod_mutation_hook
try:
pod_mutation_hook(pod)
except Exception as e:
raise PodMutationHookException from e
return pod
@classmethod
def build_selector_for_k8s_executor_pod(
cls,
*,
dag_id,
task_id,
try_number,
map_index=None,
execution_date=None,
run_id=None,
airflow_worker=None,
):
"""
Generate selector for kubernetes executor pod.
:meta private:
"""
labels = cls.build_labels_for_k8s_executor_pod(
dag_id=dag_id,
task_id=task_id,
try_number=try_number,
map_index=map_index,
execution_date=execution_date,
run_id=run_id,
airflow_worker=airflow_worker,
)
label_strings = [f"{label_id}={label}" for label_id, label in sorted(labels.items())]
selector = ",".join(label_strings)
if not airflow_worker: # this filters out KPO pods even when we don't know the scheduler job id
selector += ",airflow-worker"
return selector
@classmethod
def build_labels_for_k8s_executor_pod(
cls,
*,
dag_id,
task_id,
try_number,
airflow_worker=None,
map_index=None,
execution_date=None,
run_id=None,
):
"""
Generate labels for kubernetes executor pod.
:meta private:
"""
labels = {
"dag_id": make_safe_label_value(dag_id),
"task_id": make_safe_label_value(task_id),
"try_number": str(try_number),
"kubernetes_executor": "True",
"airflow_version": airflow_version.replace("+", "-"),
}
if airflow_worker is not None:
labels["airflow-worker"] = make_safe_label_value(str(airflow_worker))
if map_index is not None and map_index >= 0:
labels["map_index"] = str(map_index)
if execution_date:
labels["execution_date"] = datetime_to_label_safe_datestring(execution_date)
if run_id:
labels["run_id"] = make_safe_label_value(run_id)
return labels
@staticmethod
def serialize_pod(pod: k8s.V1Pod) -> dict:
"""
Convert a k8s.V1Pod into a json serializable dictionary.
:param pod: k8s.V1Pod object
:return: Serialized version of the pod returned as dict
"""
api_client = ApiClient()
return api_client.sanitize_for_serialization(pod)
@staticmethod
def deserialize_model_file(path: str) -> k8s.V1Pod:
"""
Generate a Pod from a file.
:param path: Path to the file
:return: a kubernetes.client.models.V1Pod
"""
if os.path.exists(path):
with open(path) as stream:
pod = yaml.safe_load(stream)
else:
pod = None
log.warning("Model file %s does not exist", path)
return PodGenerator.deserialize_model_dict(pod)
@staticmethod
def deserialize_model_dict(pod_dict: dict | None) -> k8s.V1Pod:
"""
Deserializes a Python dictionary to k8s.V1Pod.
Unfortunately we need access to the private method
``_ApiClient__deserialize_model`` from the kubernetes client.
This issue is tracked here; https://github.com/kubernetes-client/python/issues/977.
:param pod_dict: Serialized dict of k8s.V1Pod object
:return: De-serialized k8s.V1Pod
"""
api_client = ApiClient()
return api_client._ApiClient__deserialize_model(pod_dict, k8s.V1Pod)
@staticmethod
def make_unique_pod_id(pod_id: str) -> str | None:
r"""
Generate a unique Pod name.
Kubernetes pod names must consist of one or more lowercase
rfc1035/rfc1123 labels separated by '.' with a maximum length of 253
characters.
Name must pass the following regex for validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
For more details, see:
https://github.com/kubernetes/kubernetes/blob/release-1.1/docs/design/identifiers.md
:param pod_id: requested pod name
:return: ``str`` valid Pod name of appropriate length
"""
warnings.warn(
"This function is deprecated. Use `add_pod_suffix` in `kubernetes_helper_functions`.",
RemovedInAirflow3Warning,
)
if not pod_id:
return None
max_pod_id_len = 100 # arbitrarily chosen
suffix = rand_str(8) # 8 seems good enough
base_pod_id_len = max_pod_id_len - len(suffix) - 1 # -1 for separator
trimmed_pod_id = pod_id[:base_pod_id_len].rstrip("-.")
return f"{trimmed_pod_id}-{suffix}"
def merge_objects(base_obj, client_obj):
"""
Merge objects.
:param base_obj: has the base attributes which are overwritten if they exist
in the client_obj and remain if they do not exist in the client_obj
:param client_obj: the object that the client wants to create.
:return: the merged objects
"""
if not base_obj:
return client_obj
if not client_obj:
return base_obj
client_obj_cp = copy.deepcopy(client_obj)
if isinstance(base_obj, dict) and isinstance(client_obj_cp, dict):
base_obj_cp = copy.deepcopy(base_obj)
base_obj_cp.update(client_obj_cp)
return base_obj_cp
for base_key in base_obj.to_dict().keys():
base_val = getattr(base_obj, base_key, None)
if not getattr(client_obj, base_key, None) and base_val:
if not isinstance(client_obj_cp, dict):
setattr(client_obj_cp, base_key, base_val)
else:
client_obj_cp[base_key] = base_val
return client_obj_cp
def extend_object_field(base_obj, client_obj, field_name):
"""
Add field values to existing objects.
:param base_obj: an object which has a property `field_name` that is a list
:param client_obj: an object which has a property `field_name` that is a list.
A copy of this object is returned with `field_name` modified
:param field_name: the name of the list field
:return: the client_obj with the property `field_name` being the two properties appended
"""
client_obj_cp = copy.deepcopy(client_obj)
base_obj_field = getattr(base_obj, field_name, None)
client_obj_field = getattr(client_obj, field_name, None)
if (not isinstance(base_obj_field, list) and base_obj_field is not None) or (
not isinstance(client_obj_field, list) and client_obj_field is not None
):
raise ValueError("The chosen field must be a list.")
if not base_obj_field:
return client_obj_cp
if not client_obj_field:
setattr(client_obj_cp, field_name, base_obj_field)
return client_obj_cp
appended_fields = base_obj_field + client_obj_field
setattr(client_obj_cp, field_name, appended_fields)
return client_obj_cp
| 23,458 | 35.654688 | 108 |
py
|
airflow
|
airflow-main/airflow/serialization/serialized_objects.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serialized DAG and BaseOperator."""
from __future__ import annotations
import collections.abc
import datetime
import enum
import inspect
import logging
import warnings
import weakref
from dataclasses import dataclass
from inspect import Parameter, signature
from typing import TYPE_CHECKING, Any, Collection, Iterable, Mapping, NamedTuple, Union
import cattr
import lazy_object_proxy
import pendulum
from dateutil import relativedelta
from pendulum.tz.timezone import FixedTimezone, Timezone
from airflow.compat.functools import cache
from airflow.configuration import conf
from airflow.datasets import Dataset
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning, SerializationError
from airflow.jobs.job import Job
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.connection import Connection
from airflow.models.dag import DAG, create_timetable
from airflow.models.dagrun import DagRun
from airflow.models.expandinput import EXPAND_INPUT_EMPTY, ExpandInput, create_expand_input, get_map_type_key
from airflow.models.mappedoperator import MappedOperator
from airflow.models.operator import Operator
from airflow.models.param import Param, ParamsDict
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance
from airflow.models.taskmixin import DAGNode
from airflow.models.xcom_arg import XComArg, deserialize_xcom_arg, serialize_xcom_arg
from airflow.providers_manager import ProvidersManager
from airflow.serialization.enums import DagAttributeTypes as DAT, Encoding
from airflow.serialization.helpers import serialize_template_field
from airflow.serialization.json_schema import Validator, load_dag_schema
from airflow.serialization.pydantic.dag_run import DagRunPydantic
from airflow.serialization.pydantic.dataset import DatasetPydantic
from airflow.serialization.pydantic.job import JobPydantic
from airflow.serialization.pydantic.taskinstance import TaskInstancePydantic
from airflow.settings import _ENABLE_AIP_44, DAGS_FOLDER, json
from airflow.timetables.base import Timetable
from airflow.utils.code_utils import get_python_source
from airflow.utils.docs import get_docs_url
from airflow.utils.module_loading import import_string, qualname
from airflow.utils.operator_resources import Resources
from airflow.utils.task_group import MappedTaskGroup, TaskGroup
if TYPE_CHECKING:
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
HAS_KUBERNETES: bool
try:
from kubernetes.client import models as k8s
from airflow.kubernetes.pod_generator import PodGenerator
except ImportError:
pass
log = logging.getLogger(__name__)
_OPERATOR_EXTRA_LINKS: set[str] = {
"airflow.operators.trigger_dagrun.TriggerDagRunLink",
"airflow.sensors.external_task.ExternalDagLink",
# Deprecated names, so that existing serialized dags load straight away.
"airflow.sensors.external_task.ExternalTaskSensorLink",
"airflow.operators.dagrun_operator.TriggerDagRunLink",
"airflow.sensors.external_task_sensor.ExternalTaskSensorLink",
}
@cache
def get_operator_extra_links() -> set[str]:
"""Get the operator extra links.
This includes both the built-in ones, and those come from the providers.
"""
_OPERATOR_EXTRA_LINKS.update(ProvidersManager().extra_links_class_names)
return _OPERATOR_EXTRA_LINKS
@cache
def _get_default_mapped_partial() -> dict[str, Any]:
"""Get default partial kwargs in a mapped operator.
This is used to simplify a serialized mapped operator by excluding default
values supplied in the implementation from the serialized dict. Since those
are defaults, they are automatically supplied on de-serialization, so we
don't need to store them.
"""
# Use the private _expand() method to avoid the empty kwargs check.
default = BaseOperator.partial(task_id="_")._expand(EXPAND_INPUT_EMPTY, strict=False).partial_kwargs
return BaseSerialization.serialize(default)[Encoding.VAR]
def encode_relativedelta(var: relativedelta.relativedelta) -> dict[str, Any]:
"""Encode a relativedelta object."""
encoded = {k: v for k, v in var.__dict__.items() if not k.startswith("_") and v}
if var.weekday and var.weekday.n:
# Every n'th Friday for example
encoded["weekday"] = [var.weekday.weekday, var.weekday.n]
elif var.weekday:
encoded["weekday"] = [var.weekday.weekday]
return encoded
def decode_relativedelta(var: dict[str, Any]) -> relativedelta.relativedelta:
"""Dencode a relativedelta object."""
if "weekday" in var:
var["weekday"] = relativedelta.weekday(*var["weekday"]) # type: ignore
return relativedelta.relativedelta(**var)
def encode_timezone(var: Timezone) -> str | int:
"""Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``).
"""
if isinstance(var, FixedTimezone):
if var.offset == 0:
return "UTC"
return var.offset
if isinstance(var, Timezone):
return var.name
raise ValueError(
f"DAG timezone should be a pendulum.tz.Timezone, not {var!r}. "
f"See {get_docs_url('timezone.html#time-zone-aware-dags')}"
)
def decode_timezone(var: str | int) -> Timezone:
"""Decode a previously serialized Pendulum Timezone."""
return pendulum.tz.timezone(var)
def _get_registered_timetable(importable_string: str) -> type[Timetable] | None:
from airflow import plugins_manager
if importable_string.startswith("airflow.timetables."):
return import_string(importable_string)
plugins_manager.initialize_timetables_plugins()
if plugins_manager.timetable_classes:
return plugins_manager.timetable_classes.get(importable_string)
else:
return None
class _TimetableNotRegistered(ValueError):
def __init__(self, type_string: str) -> None:
self.type_string = type_string
def __str__(self) -> str:
return (
f"Timetable class {self.type_string!r} is not registered or "
"you have a top level database access that disrupted the session. "
"Please check the airflow best practices documentation."
)
def _encode_timetable(var: Timetable) -> dict[str, Any]:
"""Encode a timetable instance.
This delegates most of the serialization work to the type, so the behavior
can be completely controlled by a custom subclass.
"""
timetable_class = type(var)
importable_string = qualname(timetable_class)
if _get_registered_timetable(importable_string) is None:
raise _TimetableNotRegistered(importable_string)
return {Encoding.TYPE: importable_string, Encoding.VAR: var.serialize()}
def _decode_timetable(var: dict[str, Any]) -> Timetable:
"""Decode a previously serialized timetable.
Most of the deserialization logic is delegated to the actual type, which
we import from string.
"""
importable_string = var[Encoding.TYPE]
timetable_class = _get_registered_timetable(importable_string)
if timetable_class is None:
raise _TimetableNotRegistered(importable_string)
return timetable_class.deserialize(var[Encoding.VAR])
class _XComRef(NamedTuple):
"""Used to store info needed to create XComArg.
We can't turn it in to a XComArg until we've loaded _all_ the tasks, so when
deserializing an operator, we need to create something in its place, and
post-process it in ``deserialize_dag``.
"""
data: dict
def deref(self, dag: DAG) -> XComArg:
return deserialize_xcom_arg(self.data, dag)
# These two should be kept in sync. Note that these are intentionally not using
# the type declarations in expandinput.py so we always remember to update
# serialization logic when adding new ExpandInput variants. If you add things to
# the unions, be sure to update _ExpandInputRef to match.
_ExpandInputOriginalValue = Union[
# For .expand(**kwargs).
Mapping[str, Any],
# For expand_kwargs(arg).
XComArg,
Collection[Union[XComArg, Mapping[str, Any]]],
]
_ExpandInputSerializedValue = Union[
# For .expand(**kwargs).
Mapping[str, Any],
# For expand_kwargs(arg).
_XComRef,
Collection[Union[_XComRef, Mapping[str, Any]]],
]
class _ExpandInputRef(NamedTuple):
"""Used to store info needed to create a mapped operator's expand input.
This references a ``ExpandInput`` type, but replaces ``XComArg`` objects
with ``_XComRef`` (see documentation on the latter type for reasoning).
"""
key: str
value: _ExpandInputSerializedValue
@classmethod
def validate_expand_input_value(cls, value: _ExpandInputOriginalValue) -> None:
"""Validate we've covered all ``ExpandInput.value`` types.
This function does not actually do anything, but is called during
serialization so Mypy will *statically* check we have handled all
possible ExpandInput cases.
"""
def deref(self, dag: DAG) -> ExpandInput:
"""De-reference into a concrete ExpandInput object.
If you add more cases here, be sure to update _ExpandInputOriginalValue
and _ExpandInputSerializedValue to match the logic.
"""
if isinstance(self.value, _XComRef):
value: Any = self.value.deref(dag)
elif isinstance(self.value, collections.abc.Mapping):
value = {k: v.deref(dag) if isinstance(v, _XComRef) else v for k, v in self.value.items()}
else:
value = [v.deref(dag) if isinstance(v, _XComRef) else v for v in self.value]
return create_expand_input(self.key, value)
class BaseSerialization:
"""BaseSerialization provides utils for serialization."""
# JSON primitive types.
_primitive_types = (int, bool, float, str)
# Time types.
# datetime.date and datetime.time are converted to strings.
_datetime_types = (datetime.datetime,)
# Object types that are always excluded in serialization.
_excluded_types = (logging.Logger, Connection, type, property)
_json_schema: Validator | None = None
# Should the extra operator link be loaded via plugins when
# de-serializing the DAG? This flag is set to False in Scheduler so that Extra Operator links
# are not loaded to not run User code in Scheduler.
_load_operator_extra_links = True
_CONSTRUCTOR_PARAMS: dict[str, Parameter] = {}
SERIALIZER_VERSION = 1
@classmethod
def to_json(cls, var: DAG | BaseOperator | dict | list | set | tuple) -> str:
"""Stringifies DAGs and operators contained by var and returns a JSON string of var."""
return json.dumps(cls.to_dict(var), ensure_ascii=True)
@classmethod
def to_dict(cls, var: DAG | BaseOperator | dict | list | set | tuple) -> dict:
"""Stringifies DAGs and operators contained by var and returns a dict of var."""
# Don't call on this class directly - only SerializedDAG or
# SerializedBaseOperator should be used as the "entrypoint"
raise NotImplementedError()
@classmethod
def from_json(cls, serialized_obj: str) -> BaseSerialization | dict | list | set | tuple:
"""Deserializes json_str and reconstructs all DAGs and operators it contains."""
return cls.from_dict(json.loads(serialized_obj))
@classmethod
def from_dict(cls, serialized_obj: dict[Encoding, Any]) -> BaseSerialization | dict | list | set | tuple:
"""Deserialize a dict of type decorators and reconstructs all DAGs and operators it contains."""
return cls.deserialize(serialized_obj)
@classmethod
def validate_schema(cls, serialized_obj: str | dict) -> None:
"""Validate serialized_obj satisfies JSON schema."""
if cls._json_schema is None:
raise AirflowException(f"JSON schema of {cls.__name__:s} is not set.")
if isinstance(serialized_obj, dict):
cls._json_schema.validate(serialized_obj)
elif isinstance(serialized_obj, str):
cls._json_schema.validate(json.loads(serialized_obj))
else:
raise TypeError("Invalid type: Only dict and str are supported.")
@staticmethod
def _encode(x: Any, type_: Any) -> dict[Encoding, Any]:
"""Encode data by a JSON dict."""
return {Encoding.VAR: x, Encoding.TYPE: type_}
@classmethod
def _is_primitive(cls, var: Any) -> bool:
"""Primitive types."""
return var is None or isinstance(var, cls._primitive_types)
@classmethod
def _is_excluded(cls, var: Any, attrname: str, instance: Any) -> bool:
"""Types excluded from serialization."""
if var is None:
if not cls._is_constructor_param(attrname, instance):
# Any instance attribute, that is not a constructor argument, we exclude None as the default
return True
return cls._value_is_hardcoded_default(attrname, var, instance)
return isinstance(var, cls._excluded_types) or cls._value_is_hardcoded_default(
attrname, var, instance
)
@classmethod
def serialize_to_json(
cls, object_to_serialize: BaseOperator | MappedOperator | DAG, decorated_fields: set
) -> dict[str, Any]:
"""Serializes an object to JSON."""
serialized_object: dict[str, Any] = {}
keys_to_serialize = object_to_serialize.get_serialized_fields()
for key in keys_to_serialize:
# None is ignored in serialized form and is added back in deserialization.
value = getattr(object_to_serialize, key, None)
if cls._is_excluded(value, key, object_to_serialize):
continue
if key == "_operator_name":
# when operator_name matches task_type, we can remove
# it to reduce the JSON payload
task_type = getattr(object_to_serialize, "_task_type", None)
if value != task_type:
serialized_object[key] = cls.serialize(value)
elif key in decorated_fields:
serialized_object[key] = cls.serialize(value)
elif key == "timetable" and value is not None:
serialized_object[key] = _encode_timetable(value)
else:
value = cls.serialize(value)
if isinstance(value, dict) and Encoding.TYPE in value:
value = value[Encoding.VAR]
serialized_object[key] = value
return serialized_object
@classmethod
def serialize(
cls, var: Any, *, strict: bool = False, use_pydantic_models: bool = False
) -> Any: # Unfortunately there is no support for recursive types in mypy
"""Helper function of depth first search for serialization.
The serialization protocol is:
(1) keeping JSON supported types: primitives, dict, list;
(2) encoding other types as ``{TYPE: 'foo', VAR: 'bar'}``, the deserialization
step decode VAR according to TYPE;
(3) Operator has a special field CLASS to record the original class
name for displaying in UI.
:meta private:
"""
if use_pydantic_models and not _ENABLE_AIP_44:
raise RuntimeError(
"Setting use_pydantic_models = True requires AIP-44 (in progress) feature flag to be true. "
"This parameter will be removed eventually when new serialization is used by AIP-44"
)
if cls._is_primitive(var):
# enum.IntEnum is an int instance, it causes json dumps error so we use its value.
if isinstance(var, enum.Enum):
return var.value
return var
elif isinstance(var, dict):
return cls._encode(
{
str(k): cls.serialize(v, strict=strict, use_pydantic_models=use_pydantic_models)
for k, v in var.items()
},
type_=DAT.DICT,
)
elif isinstance(var, list):
return [cls.serialize(v, strict=strict, use_pydantic_models=use_pydantic_models) for v in var]
elif var.__class__.__name__ == "V1Pod" and _has_kubernetes() and isinstance(var, k8s.V1Pod):
json_pod = PodGenerator.serialize_pod(var)
return cls._encode(json_pod, type_=DAT.POD)
elif isinstance(var, DAG):
return SerializedDAG.serialize_dag(var)
elif isinstance(var, Resources):
return var.to_dict()
elif isinstance(var, MappedOperator):
return SerializedBaseOperator.serialize_mapped_operator(var)
elif isinstance(var, BaseOperator):
return SerializedBaseOperator.serialize_operator(var)
elif isinstance(var, cls._datetime_types):
return cls._encode(var.timestamp(), type_=DAT.DATETIME)
elif isinstance(var, datetime.timedelta):
return cls._encode(var.total_seconds(), type_=DAT.TIMEDELTA)
elif isinstance(var, Timezone):
return cls._encode(encode_timezone(var), type_=DAT.TIMEZONE)
elif isinstance(var, relativedelta.relativedelta):
return cls._encode(encode_relativedelta(var), type_=DAT.RELATIVEDELTA)
elif callable(var):
return str(get_python_source(var))
elif isinstance(var, set):
# FIXME: casts set to list in customized serialization in future.
try:
return cls._encode(
sorted(
cls.serialize(v, strict=strict, use_pydantic_models=use_pydantic_models) for v in var
),
type_=DAT.SET,
)
except TypeError:
return cls._encode(
[cls.serialize(v, strict=strict, use_pydantic_models=use_pydantic_models) for v in var],
type_=DAT.SET,
)
elif isinstance(var, tuple):
# FIXME: casts tuple to list in customized serialization in future.
return cls._encode(
[cls.serialize(v, strict=strict, use_pydantic_models=use_pydantic_models) for v in var],
type_=DAT.TUPLE,
)
elif isinstance(var, TaskGroup):
return TaskGroupSerialization.serialize_task_group(var)
elif isinstance(var, Param):
return cls._encode(cls._serialize_param(var), type_=DAT.PARAM)
elif isinstance(var, XComArg):
return cls._encode(serialize_xcom_arg(var), type_=DAT.XCOM_REF)
elif isinstance(var, Dataset):
return cls._encode(dict(uri=var.uri, extra=var.extra), type_=DAT.DATASET)
elif isinstance(var, SimpleTaskInstance):
return cls._encode(
cls.serialize(var.__dict__, strict=strict, use_pydantic_models=use_pydantic_models),
type_=DAT.SIMPLE_TASK_INSTANCE,
)
elif use_pydantic_models and _ENABLE_AIP_44:
if isinstance(var, Job):
return cls._encode(JobPydantic.from_orm(var).dict(), type_=DAT.BASE_JOB)
elif isinstance(var, TaskInstance):
return cls._encode(TaskInstancePydantic.from_orm(var).dict(), type_=DAT.TASK_INSTANCE)
elif isinstance(var, DagRun):
return cls._encode(DagRunPydantic.from_orm(var).dict(), type_=DAT.DAG_RUN)
elif isinstance(var, Dataset):
return cls._encode(DatasetPydantic.from_orm(var).dict(), type_=DAT.DATA_SET)
else:
return cls.default_serialization(strict, var)
else:
return cls.default_serialization(strict, var)
@classmethod
def default_serialization(cls, strict, var) -> str:
log.debug("Cast type %s to str in serialization.", type(var))
if strict:
raise SerializationError("Encountered unexpected type")
return str(var)
@classmethod
def deserialize(cls, encoded_var: Any, use_pydantic_models=False) -> Any:
"""Helper function of depth first search for deserialization.
:meta private:
"""
# JSON primitives (except for dict) are not encoded.
if use_pydantic_models and not _ENABLE_AIP_44:
raise RuntimeError(
"Setting use_pydantic_models = True requires AIP-44 (in progress) feature flag to be true. "
"This parameter will be removed eventually when new serialization is used by AIP-44"
)
if cls._is_primitive(encoded_var):
return encoded_var
elif isinstance(encoded_var, list):
return [cls.deserialize(v, use_pydantic_models) for v in encoded_var]
if not isinstance(encoded_var, dict):
raise ValueError(f"The encoded_var should be dict and is {type(encoded_var)}")
var = encoded_var[Encoding.VAR]
type_ = encoded_var[Encoding.TYPE]
if type_ == DAT.DICT:
return {k: cls.deserialize(v, use_pydantic_models) for k, v in var.items()}
elif type_ == DAT.DAG:
return SerializedDAG.deserialize_dag(var)
elif type_ == DAT.OP:
return SerializedBaseOperator.deserialize_operator(var)
elif type_ == DAT.DATETIME:
return pendulum.from_timestamp(var)
elif type_ == DAT.POD:
if not _has_kubernetes():
raise RuntimeError("Cannot deserialize POD objects without kubernetes libraries installed!")
pod = PodGenerator.deserialize_model_dict(var)
return pod
elif type_ == DAT.TIMEDELTA:
return datetime.timedelta(seconds=var)
elif type_ == DAT.TIMEZONE:
return decode_timezone(var)
elif type_ == DAT.RELATIVEDELTA:
return decode_relativedelta(var)
elif type_ == DAT.SET:
return {cls.deserialize(v, use_pydantic_models) for v in var}
elif type_ == DAT.TUPLE:
return tuple(cls.deserialize(v, use_pydantic_models) for v in var)
elif type_ == DAT.PARAM:
return cls._deserialize_param(var)
elif type_ == DAT.XCOM_REF:
return _XComRef(var) # Delay deserializing XComArg objects until we have the entire DAG.
elif type_ == DAT.DATASET:
return Dataset(**var)
elif type_ == DAT.SIMPLE_TASK_INSTANCE:
return SimpleTaskInstance(**cls.deserialize(var))
elif use_pydantic_models and _ENABLE_AIP_44:
if type_ == DAT.BASE_JOB:
return JobPydantic.parse_obj(var)
elif type_ == DAT.TASK_INSTANCE:
return TaskInstancePydantic.parse_obj(var)
elif type_ == DAT.DAG_RUN:
return DagRunPydantic.parse_obj(var)
elif type_ == DAT.DATA_SET:
return DatasetPydantic.parse_obj(var)
else:
raise TypeError(f"Invalid type {type_!s} in deserialization.")
_deserialize_datetime = pendulum.from_timestamp
_deserialize_timezone = pendulum.tz.timezone
@classmethod
def _deserialize_timedelta(cls, seconds: int) -> datetime.timedelta:
return datetime.timedelta(seconds=seconds)
@classmethod
def _is_constructor_param(cls, attrname: str, instance: Any) -> bool:
return attrname in cls._CONSTRUCTOR_PARAMS
@classmethod
def _value_is_hardcoded_default(cls, attrname: str, value: Any, instance: Any) -> bool:
"""
Return true if ``value`` is the hard-coded default for the given attribute.
This takes in to account cases where the ``max_active_tasks`` parameter is
stored in the ``_max_active_tasks`` attribute.
And by using `is` here only and not `==` this copes with the case a
user explicitly specifies an attribute with the same "value" as the
default. (This is because ``"default" is "default"`` will be False as
they are different strings with the same characters.)
Also returns True if the value is an empty list or empty dict. This is done
to account for the case where the default value of the field is None but has the
``field = field or {}`` set.
"""
if attrname in cls._CONSTRUCTOR_PARAMS and (
cls._CONSTRUCTOR_PARAMS[attrname] is value or (value in [{}, []])
):
return True
return False
@classmethod
def _serialize_param(cls, param: Param):
return dict(
__class=f"{param.__module__}.{param.__class__.__name__}",
default=cls.serialize(param.value),
description=cls.serialize(param.description),
schema=cls.serialize(param.schema),
)
@classmethod
def _deserialize_param(cls, param_dict: dict):
"""
Workaround to serialize Param on older versions.
In 2.2.0, Param attrs were assumed to be json-serializable and were not run through
this class's ``serialize`` method. So before running through ``deserialize``,
we first verify that it's necessary to do.
"""
class_name = param_dict["__class"]
class_: type[Param] = import_string(class_name)
attrs = ("default", "description", "schema")
kwargs = {}
def is_serialized(val):
if isinstance(val, dict):
return Encoding.TYPE in val
if isinstance(val, list):
return all(isinstance(item, dict) and Encoding.TYPE in item for item in val)
return False
for attr in attrs:
if attr not in param_dict:
continue
val = param_dict[attr]
if is_serialized(val):
deserialized_val = cls.deserialize(param_dict[attr])
kwargs[attr] = deserialized_val
else:
kwargs[attr] = val
return class_(**kwargs)
@classmethod
def _serialize_params_dict(cls, params: ParamsDict | dict):
"""Serialize Params dict for a DAG or task."""
serialized_params = {}
for k, v in params.items():
# TODO: As of now, we would allow serialization of params which are of type Param only.
try:
class_identity = f"{v.__module__}.{v.__class__.__name__}"
except AttributeError:
class_identity = ""
if class_identity == "airflow.models.param.Param":
serialized_params[k] = cls._serialize_param(v)
else:
raise ValueError(
f"Params to a DAG or a Task can be only of type airflow.models.param.Param, "
f"but param {k!r} is {v.__class__}"
)
return serialized_params
@classmethod
def _deserialize_params_dict(cls, encoded_params: dict) -> ParamsDict:
"""Deserialize a DAG's Params dict."""
op_params = {}
for k, v in encoded_params.items():
if isinstance(v, dict) and "__class" in v:
op_params[k] = cls._deserialize_param(v)
else:
# Old style params, convert it
op_params[k] = Param(v)
return ParamsDict(op_params)
class DependencyDetector:
"""
Detects dependencies between DAGs.
:meta private:
"""
@staticmethod
def detect_task_dependencies(task: Operator) -> list[DagDependency]:
"""Detects dependencies caused by tasks."""
from airflow.operators.trigger_dagrun import TriggerDagRunOperator
from airflow.sensors.external_task import ExternalTaskSensor
deps = []
if isinstance(task, TriggerDagRunOperator):
deps.append(
DagDependency(
source=task.dag_id,
target=getattr(task, "trigger_dag_id"),
dependency_type="trigger",
dependency_id=task.task_id,
)
)
elif isinstance(task, ExternalTaskSensor):
deps.append(
DagDependency(
source=getattr(task, "external_dag_id"),
target=task.dag_id,
dependency_type="sensor",
dependency_id=task.task_id,
)
)
for obj in task.outlets or []:
if isinstance(obj, Dataset):
deps.append(
DagDependency(
source=task.dag_id,
target="dataset",
dependency_type="dataset",
dependency_id=obj.uri,
)
)
return deps
@staticmethod
def detect_dag_dependencies(dag: DAG | None) -> Iterable[DagDependency]:
"""Detects dependencies set directly on the DAG object."""
if not dag:
return
for x in dag.dataset_triggers:
yield DagDependency(
source="dataset",
target=dag.dag_id,
dependency_type="dataset",
dependency_id=x.uri,
)
class SerializedBaseOperator(BaseOperator, BaseSerialization):
"""A JSON serializable representation of operator.
All operators are casted to SerializedBaseOperator after deserialization.
Class specific attributes used by UI are move to object attributes.
"""
_decorated_fields = {"executor_config"}
_CONSTRUCTOR_PARAMS = {
k: v.default
for k, v in signature(BaseOperator.__init__).parameters.items()
if v.default is not v.empty
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# task_type is used by UI to display the correct class type, because UI only
# receives BaseOperator from deserialized DAGs.
self._task_type = "BaseOperator"
# Move class attributes into object attributes.
self.ui_color = BaseOperator.ui_color
self.ui_fgcolor = BaseOperator.ui_fgcolor
self.template_ext = BaseOperator.template_ext
self.template_fields = BaseOperator.template_fields
self.operator_extra_links = BaseOperator.operator_extra_links
@property
def task_type(self) -> str:
# Overwrites task_type of BaseOperator to use _task_type instead of
# __class__.__name__.
return self._task_type
@task_type.setter
def task_type(self, task_type: str):
self._task_type = task_type
@property
def operator_name(self) -> str:
# Overwrites operator_name of BaseOperator to use _operator_name instead of
# __class__.operator_name.
return self._operator_name
@operator_name.setter
def operator_name(self, operator_name: str):
self._operator_name = operator_name
@classmethod
def serialize_mapped_operator(cls, op: MappedOperator) -> dict[str, Any]:
serialized_op = cls._serialize_node(op, include_deps=op.deps != MappedOperator.deps_for(BaseOperator))
# Handle expand_input and op_kwargs_expand_input.
expansion_kwargs = op._get_specified_expand_input()
if TYPE_CHECKING: # Let Mypy check the input type for us!
_ExpandInputRef.validate_expand_input_value(expansion_kwargs.value)
serialized_op[op._expand_input_attr] = {
"type": get_map_type_key(expansion_kwargs),
"value": cls.serialize(expansion_kwargs.value),
}
# Simplify partial_kwargs by comparing it to the most barebone object.
# Remove all entries that are simply default values.
serialized_partial = serialized_op["partial_kwargs"]
for k, default in _get_default_mapped_partial().items():
try:
v = serialized_partial[k]
except KeyError:
continue
if v == default:
del serialized_partial[k]
serialized_op["_is_mapped"] = True
return serialized_op
@classmethod
def serialize_operator(cls, op: BaseOperator) -> dict[str, Any]:
return cls._serialize_node(op, include_deps=op.deps is not BaseOperator.deps)
@classmethod
def _serialize_node(cls, op: BaseOperator | MappedOperator, include_deps: bool) -> dict[str, Any]:
"""Serializes operator into a JSON object."""
serialize_op = cls.serialize_to_json(op, cls._decorated_fields)
serialize_op["_task_type"] = getattr(op, "_task_type", type(op).__name__)
serialize_op["_task_module"] = getattr(op, "_task_module", type(op).__module__)
if op.operator_name != serialize_op["_task_type"]:
serialize_op["_operator_name"] = op.operator_name
# Used to determine if an Operator is inherited from EmptyOperator
serialize_op["_is_empty"] = op.inherits_from_empty_operator
if op.operator_extra_links:
serialize_op["_operator_extra_links"] = cls._serialize_operator_extra_links(
op.operator_extra_links.__get__(op)
if isinstance(op.operator_extra_links, property)
else op.operator_extra_links
)
if include_deps:
serialize_op["deps"] = cls._serialize_deps(op.deps)
# Store all template_fields as they are if there are JSON Serializable
# If not, store them as strings
# And raise an exception if the field is not templateable
forbidden_fields = set(inspect.signature(BaseOperator.__init__).parameters.keys())
if op.template_fields:
for template_field in op.template_fields:
if template_field in forbidden_fields:
raise AirflowException(f"Cannot template BaseOperator fields: {template_field}")
value = getattr(op, template_field, None)
if not cls._is_excluded(value, template_field, op):
serialize_op[template_field] = serialize_template_field(value)
if op.params:
serialize_op["params"] = cls._serialize_params_dict(op.params)
return serialize_op
@classmethod
def _serialize_deps(cls, op_deps: Iterable[BaseTIDep]) -> list[str]:
from airflow import plugins_manager
plugins_manager.initialize_ti_deps_plugins()
if plugins_manager.registered_ti_dep_classes is None:
raise AirflowException("Can not load plugins")
deps = []
for dep in op_deps:
klass = type(dep)
module_name = klass.__module__
qualname = f"{module_name}.{klass.__name__}"
if (
not qualname.startswith("airflow.ti_deps.deps.")
and qualname not in plugins_manager.registered_ti_dep_classes
):
raise SerializationError(
f"Custom dep class {qualname} not serialized, please register it through plugins."
)
deps.append(qualname)
# deps needs to be sorted here, because op_deps is a set, which is unstable when traversing,
# and the same call may get different results.
# When calling json.dumps(self.data, sort_keys=True) to generate dag_hash, misjudgment will occur
return sorted(deps)
@classmethod
def populate_operator(cls, op: Operator, encoded_op: dict[str, Any]) -> None:
if "label" not in encoded_op:
# Handle deserialization of old data before the introduction of TaskGroup
encoded_op["label"] = encoded_op["task_id"]
# Extra Operator Links defined in Plugins
op_extra_links_from_plugin = {}
if "_operator_name" not in encoded_op:
encoded_op["_operator_name"] = encoded_op["_task_type"]
# We don't want to load Extra Operator links in Scheduler
if cls._load_operator_extra_links:
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.operator_extra_links is None:
raise AirflowException("Can not load plugins")
for ope in plugins_manager.operator_extra_links:
for operator in ope.operators:
if (
operator.__name__ == encoded_op["_task_type"]
and operator.__module__ == encoded_op["_task_module"]
):
op_extra_links_from_plugin.update({ope.name: ope})
# If OperatorLinks are defined in Plugins but not in the Operator that is being Serialized
# set the Operator links attribute
# The case for "If OperatorLinks are defined in the operator that is being Serialized"
# is handled in the deserialization loop where it matches k == "_operator_extra_links"
if op_extra_links_from_plugin and "_operator_extra_links" not in encoded_op:
setattr(op, "operator_extra_links", list(op_extra_links_from_plugin.values()))
for k, v in encoded_op.items():
# Todo: TODO: Remove in Airflow 3.0 when dummy operator is removed
if k == "_is_dummy":
k = "_is_empty"
if k in ("_outlets", "_inlets"):
# `_outlets` -> `outlets`
k = k[1:]
if k == "_downstream_task_ids":
# Upgrade from old format/name
k = "downstream_task_ids"
if k == "label":
# Label shouldn't be set anymore -- it's computed from task_id now
continue
elif k == "downstream_task_ids":
v = set(v)
elif k == "subdag":
v = SerializedDAG.deserialize_dag(v)
elif k in {"retry_delay", "execution_timeout", "sla", "max_retry_delay"}:
v = cls._deserialize_timedelta(v)
elif k in encoded_op["template_fields"]:
pass
elif k == "resources":
v = Resources.from_dict(v)
elif k.endswith("_date"):
v = cls._deserialize_datetime(v)
elif k == "_operator_extra_links":
if cls._load_operator_extra_links:
op_predefined_extra_links = cls._deserialize_operator_extra_links(v)
# If OperatorLinks with the same name exists, Links via Plugin have higher precedence
op_predefined_extra_links.update(op_extra_links_from_plugin)
else:
op_predefined_extra_links = {}
v = list(op_predefined_extra_links.values())
k = "operator_extra_links"
elif k == "deps":
v = cls._deserialize_deps(v)
elif k == "params":
v = cls._deserialize_params_dict(v)
if op.params: # Merge existing params if needed.
v, new = op.params, v
v.update(new)
elif k == "partial_kwargs":
v = {arg: cls.deserialize(value) for arg, value in v.items()}
elif k in {"expand_input", "op_kwargs_expand_input"}:
v = _ExpandInputRef(v["type"], cls.deserialize(v["value"]))
elif k in cls._decorated_fields or k not in op.get_serialized_fields():
v = cls.deserialize(v)
elif k in ("outlets", "inlets"):
v = cls.deserialize(v)
elif k == "on_failure_fail_dagrun":
k = "_on_failure_fail_dagrun"
# else use v as it is
setattr(op, k, v)
for k in op.get_serialized_fields() - encoded_op.keys() - cls._CONSTRUCTOR_PARAMS.keys():
# TODO: refactor deserialization of BaseOperator and MappedOperaotr (split it out), then check
# could go away.
if not hasattr(op, k):
setattr(op, k, None)
# Set all the template_field to None that were not present in Serialized JSON
for field in op.template_fields:
if not hasattr(op, field):
setattr(op, field, None)
# Used to determine if an Operator is inherited from EmptyOperator
setattr(op, "_is_empty", bool(encoded_op.get("_is_empty", False)))
@classmethod
def deserialize_operator(cls, encoded_op: dict[str, Any]) -> Operator:
"""Deserializes an operator from a JSON object."""
op: Operator
if encoded_op.get("_is_mapped", False):
# Most of these will be loaded later, these are just some stand-ins.
op_data = {k: v for k, v in encoded_op.items() if k in BaseOperator.get_serialized_fields()}
try:
operator_name = encoded_op["_operator_name"]
except KeyError:
operator_name = encoded_op["_task_type"]
op = MappedOperator(
operator_class=op_data,
expand_input=EXPAND_INPUT_EMPTY,
partial_kwargs={},
task_id=encoded_op["task_id"],
params={},
deps=MappedOperator.deps_for(BaseOperator),
operator_extra_links=BaseOperator.operator_extra_links,
template_ext=BaseOperator.template_ext,
template_fields=BaseOperator.template_fields,
template_fields_renderers=BaseOperator.template_fields_renderers,
ui_color=BaseOperator.ui_color,
ui_fgcolor=BaseOperator.ui_fgcolor,
is_empty=False,
task_module=encoded_op["_task_module"],
task_type=encoded_op["_task_type"],
operator_name=operator_name,
dag=None,
task_group=None,
start_date=None,
end_date=None,
disallow_kwargs_override=encoded_op["_disallow_kwargs_override"],
expand_input_attr=encoded_op["_expand_input_attr"],
)
else:
op = SerializedBaseOperator(task_id=encoded_op["task_id"])
cls.populate_operator(op, encoded_op)
return op
@classmethod
def detect_dependencies(cls, op: Operator) -> set[DagDependency]:
"""Detects between DAG dependencies for the operator."""
def get_custom_dep() -> list[DagDependency]:
"""
If custom dependency detector is configured, use it.
TODO: Remove this logic in 3.0.
"""
custom_dependency_detector_cls = conf.getimport("scheduler", "dependency_detector", fallback=None)
if not (
custom_dependency_detector_cls is None or custom_dependency_detector_cls is DependencyDetector
):
warnings.warn(
"Use of a custom dependency detector is deprecated. "
"Support will be removed in a future release.",
RemovedInAirflow3Warning,
)
dep = custom_dependency_detector_cls().detect_task_dependencies(op)
if type(dep) is DagDependency:
return [dep]
return []
dependency_detector = DependencyDetector()
deps = set(dependency_detector.detect_task_dependencies(op))
deps.update(get_custom_dep()) # todo: remove in 3.0
return deps
@classmethod
def _is_excluded(cls, var: Any, attrname: str, op: DAGNode):
if var is not None and op.has_dag() and attrname.endswith("_date"):
# If this date is the same as the matching field in the dag, then
# don't store it again at the task level.
dag_date = getattr(op.dag, attrname, None)
if var is dag_date or var == dag_date:
return True
return super()._is_excluded(var, attrname, op)
@classmethod
def _deserialize_deps(cls, deps: list[str]) -> set[BaseTIDep]:
from airflow import plugins_manager
plugins_manager.initialize_ti_deps_plugins()
if plugins_manager.registered_ti_dep_classes is None:
raise AirflowException("Can not load plugins")
instances = set()
for qn in set(deps):
if (
not qn.startswith("airflow.ti_deps.deps.")
and qn not in plugins_manager.registered_ti_dep_classes
):
raise SerializationError(
f"Custom dep class {qn} not deserialized, please register it through plugins."
)
try:
instances.add(import_string(qn)())
except ImportError:
log.warning("Error importing dep %r", qn, exc_info=True)
return instances
@classmethod
def _deserialize_operator_extra_links(cls, encoded_op_links: list) -> dict[str, BaseOperatorLink]:
"""
Deserialize Operator Links if the Classes are registered in Airflow Plugins.
Error is raised if the OperatorLink is not found in Plugins too.
:param encoded_op_links: Serialized Operator Link
:return: De-Serialized Operator Link
"""
from airflow import plugins_manager
plugins_manager.initialize_extra_operators_links_plugins()
if plugins_manager.registered_operator_link_classes is None:
raise AirflowException("Can't load plugins")
op_predefined_extra_links = {}
for _operator_links_source in encoded_op_links:
# Get the key, value pair as Tuple where key is OperatorLink ClassName
# and value is the dictionary containing the arguments passed to the OperatorLink
#
# Example of a single iteration:
#
# _operator_links_source =
# {
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink': {
# 'index': 0
# }
# },
#
# list(_operator_links_source.items()) =
# [
# (
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink',
# {'index': 0}
# )
# ]
#
# list(_operator_links_source.items())[0] =
# (
# 'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink',
# {
# 'index': 0
# }
# )
_operator_link_class_path, data = list(_operator_links_source.items())[0]
if _operator_link_class_path in get_operator_extra_links():
single_op_link_class = import_string(_operator_link_class_path)
elif _operator_link_class_path in plugins_manager.registered_operator_link_classes:
single_op_link_class = plugins_manager.registered_operator_link_classes[
_operator_link_class_path
]
else:
log.error("Operator Link class %r not registered", _operator_link_class_path)
return {}
op_predefined_extra_link: BaseOperatorLink = cattr.structure(data, single_op_link_class)
op_predefined_extra_links.update({op_predefined_extra_link.name: op_predefined_extra_link})
return op_predefined_extra_links
@classmethod
def _serialize_operator_extra_links(cls, operator_extra_links: Iterable[BaseOperatorLink]):
"""
Serialize Operator Links.
Store the import path of the OperatorLink and the arguments passed to it.
For example:
``[{'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleLink': {}}]``
:param operator_extra_links: Operator Link
:return: Serialized Operator Link
"""
serialize_operator_extra_links = []
for operator_extra_link in operator_extra_links:
op_link_arguments = cattr.unstructure(operator_extra_link)
if not isinstance(op_link_arguments, dict):
op_link_arguments = {}
module_path = (
f"{operator_extra_link.__class__.__module__}.{operator_extra_link.__class__.__name__}"
)
serialize_operator_extra_links.append({module_path: op_link_arguments})
return serialize_operator_extra_links
@classmethod
def serialize(cls, var: Any, *, strict: bool = False, use_pydantic_models: bool = False) -> Any:
# the wonders of multiple inheritance BaseOperator defines an instance method
return BaseSerialization.serialize(var=var, strict=strict, use_pydantic_models=use_pydantic_models)
@classmethod
def deserialize(cls, encoded_var: Any, use_pydantic_models: bool = False) -> Any:
return BaseSerialization.deserialize(encoded_var=encoded_var, use_pydantic_models=use_pydantic_models)
class SerializedDAG(DAG, BaseSerialization):
"""
A JSON serializable representation of DAG.
A stringified DAG can only be used in the scope of scheduler and webserver, because fields
that are not serializable, such as functions and customer defined classes, are casted to
strings.
Compared with SimpleDAG: SerializedDAG contains all information for webserver.
Compared with DagPickle: DagPickle contains all information for worker, but some DAGs are
not pickle-able. SerializedDAG works for all DAGs.
"""
_decorated_fields = {"schedule_interval", "default_args", "_access_control"}
@staticmethod
def __get_constructor_defaults():
param_to_attr = {
"max_active_tasks": "_max_active_tasks",
"description": "_description",
"default_view": "_default_view",
"access_control": "_access_control",
}
return {
param_to_attr.get(k, k): v.default
for k, v in signature(DAG.__init__).parameters.items()
if v.default is not v.empty
}
_CONSTRUCTOR_PARAMS = __get_constructor_defaults.__func__() # type: ignore
del __get_constructor_defaults
_json_schema = lazy_object_proxy.Proxy(load_dag_schema)
@classmethod
def serialize_dag(cls, dag: DAG) -> dict:
"""Serializes a DAG into a JSON object."""
try:
serialized_dag = cls.serialize_to_json(dag, cls._decorated_fields)
serialized_dag["_processor_dags_folder"] = DAGS_FOLDER
# If schedule_interval is backed by timetable, serialize only
# timetable; vice versa for a timetable backed by schedule_interval.
if dag.timetable.summary == dag.schedule_interval:
del serialized_dag["schedule_interval"]
else:
del serialized_dag["timetable"]
serialized_dag["tasks"] = [cls.serialize(task) for _, task in dag.task_dict.items()]
dag_deps = {
dep
for task in dag.task_dict.values()
for dep in SerializedBaseOperator.detect_dependencies(task)
}
dag_deps.update(DependencyDetector.detect_dag_dependencies(dag))
serialized_dag["dag_dependencies"] = [x.__dict__ for x in sorted(dag_deps)]
serialized_dag["_task_group"] = TaskGroupSerialization.serialize_task_group(dag.task_group)
# Edge info in the JSON exactly matches our internal structure
serialized_dag["edge_info"] = dag.edge_info
serialized_dag["params"] = cls._serialize_params_dict(dag.params)
# has_on_*_callback are only stored if the value is True, as the default is False
if dag.has_on_success_callback:
serialized_dag["has_on_success_callback"] = True
if dag.has_on_failure_callback:
serialized_dag["has_on_failure_callback"] = True
return serialized_dag
except SerializationError:
raise
except Exception as e:
raise SerializationError(f"Failed to serialize DAG {dag.dag_id!r}: {e}")
@classmethod
def deserialize_dag(cls, encoded_dag: dict[str, Any]) -> SerializedDAG:
"""Deserializes a DAG from a JSON object."""
dag = SerializedDAG(dag_id=encoded_dag["_dag_id"])
for k, v in encoded_dag.items():
if k == "_downstream_task_ids":
v = set(v)
elif k == "tasks":
SerializedBaseOperator._load_operator_extra_links = cls._load_operator_extra_links
v = {task["task_id"]: SerializedBaseOperator.deserialize_operator(task) for task in v}
k = "task_dict"
elif k == "timezone":
v = cls._deserialize_timezone(v)
elif k == "dagrun_timeout":
v = cls._deserialize_timedelta(v)
elif k.endswith("_date"):
v = cls._deserialize_datetime(v)
elif k == "edge_info":
# Value structure matches exactly
pass
elif k == "timetable":
v = _decode_timetable(v)
elif k in cls._decorated_fields:
v = cls.deserialize(v)
elif k == "params":
v = cls._deserialize_params_dict(v)
elif k == "dataset_triggers":
v = cls.deserialize(v)
# else use v as it is
setattr(dag, k, v)
# A DAG is always serialized with only one of schedule_interval and
# timetable. This back-populates the other to ensure the two attributes
# line up correctly on the DAG instance.
if "timetable" in encoded_dag:
dag.schedule_interval = dag.timetable.summary
else:
dag.timetable = create_timetable(dag.schedule_interval, dag.timezone)
# Set _task_group
if "_task_group" in encoded_dag:
dag._task_group = TaskGroupSerialization.deserialize_task_group(
encoded_dag["_task_group"],
None,
dag.task_dict,
dag,
)
else:
# This must be old data that had no task_group. Create a root TaskGroup and add
# all tasks to it.
dag._task_group = TaskGroup.create_root(dag)
for task in dag.tasks:
dag.task_group.add(task)
# Set has_on_*_callbacks to True if they exist in Serialized blob as False is the default
if "has_on_success_callback" in encoded_dag:
dag.has_on_success_callback = True
if "has_on_failure_callback" in encoded_dag:
dag.has_on_failure_callback = True
keys_to_set_none = dag.get_serialized_fields() - encoded_dag.keys() - cls._CONSTRUCTOR_PARAMS.keys()
for k in keys_to_set_none:
setattr(dag, k, None)
for task in dag.task_dict.values():
task.dag = dag
for date_attr in ["start_date", "end_date"]:
if getattr(task, date_attr) is None:
setattr(task, date_attr, getattr(dag, date_attr))
if task.subdag is not None:
setattr(task.subdag, "parent_dag", dag)
# Dereference expand_input and op_kwargs_expand_input.
for k in ("expand_input", "op_kwargs_expand_input"):
kwargs_ref = getattr(task, k, None)
if isinstance(kwargs_ref, _ExpandInputRef):
setattr(task, k, kwargs_ref.deref(dag))
for task_id in task.downstream_task_ids:
# Bypass set_upstream etc here - it does more than we want
dag.task_dict[task_id].upstream_task_ids.add(task.task_id)
return dag
@classmethod
def to_dict(cls, var: Any) -> dict:
"""Stringifies DAGs and operators contained by var and returns a dict of var."""
json_dict = {"__version": cls.SERIALIZER_VERSION, "dag": cls.serialize_dag(var)}
# Validate Serialized DAG with Json Schema. Raises Error if it mismatches
cls.validate_schema(json_dict)
return json_dict
@classmethod
def from_dict(cls, serialized_obj: dict) -> SerializedDAG:
"""Deserializes a python dict in to the DAG and operators it contains."""
ver = serialized_obj.get("__version", "<not present>")
if ver != cls.SERIALIZER_VERSION:
raise ValueError(f"Unsure how to deserialize version {ver!r}")
return cls.deserialize_dag(serialized_obj["dag"])
class TaskGroupSerialization(BaseSerialization):
"""JSON serializable representation of a task group."""
@classmethod
def serialize_task_group(cls, task_group: TaskGroup) -> dict[str, Any] | None:
"""Serializes TaskGroup into a JSON object."""
if not task_group:
return None
# task_group.xxx_ids needs to be sorted here, because task_group.xxx_ids is a set,
# when converting set to list, the order is uncertain.
# When calling json.dumps(self.data, sort_keys=True) to generate dag_hash, misjudgment will occur
encoded = {
"_group_id": task_group._group_id,
"prefix_group_id": task_group.prefix_group_id,
"tooltip": task_group.tooltip,
"ui_color": task_group.ui_color,
"ui_fgcolor": task_group.ui_fgcolor,
"children": {
label: child.serialize_for_task_group() for label, child in task_group.children.items()
},
"upstream_group_ids": cls.serialize(sorted(task_group.upstream_group_ids)),
"downstream_group_ids": cls.serialize(sorted(task_group.downstream_group_ids)),
"upstream_task_ids": cls.serialize(sorted(task_group.upstream_task_ids)),
"downstream_task_ids": cls.serialize(sorted(task_group.downstream_task_ids)),
}
if isinstance(task_group, MappedTaskGroup):
expand_input = task_group._expand_input
encoded["expand_input"] = {
"type": get_map_type_key(expand_input),
"value": cls.serialize(expand_input.value),
}
encoded["is_mapped"] = True
return encoded
@classmethod
def deserialize_task_group(
cls,
encoded_group: dict[str, Any],
parent_group: TaskGroup | None,
task_dict: dict[str, Operator],
dag: SerializedDAG,
) -> TaskGroup:
"""Deserializes a TaskGroup from a JSON object."""
group_id = cls.deserialize(encoded_group["_group_id"])
kwargs = {
key: cls.deserialize(encoded_group[key])
for key in ["prefix_group_id", "tooltip", "ui_color", "ui_fgcolor"]
}
if not encoded_group.get("is_mapped"):
group = TaskGroup(group_id=group_id, parent_group=parent_group, dag=dag, **kwargs)
else:
xi = encoded_group["expand_input"]
group = MappedTaskGroup(
group_id=group_id,
parent_group=parent_group,
dag=dag,
expand_input=_ExpandInputRef(xi["type"], cls.deserialize(xi["value"])).deref(dag),
**kwargs,
)
def set_ref(task: Operator) -> Operator:
task.task_group = weakref.proxy(group)
return task
group.children = {
label: set_ref(task_dict[val])
if _type == DAT.OP
else cls.deserialize_task_group(val, group, task_dict, dag=dag)
for label, (_type, val) in encoded_group["children"].items()
}
group.upstream_group_ids.update(cls.deserialize(encoded_group["upstream_group_ids"]))
group.downstream_group_ids.update(cls.deserialize(encoded_group["downstream_group_ids"]))
group.upstream_task_ids.update(cls.deserialize(encoded_group["upstream_task_ids"]))
group.downstream_task_ids.update(cls.deserialize(encoded_group["downstream_task_ids"]))
return group
@dataclass(frozen=True, order=True)
class DagDependency:
"""
Dataclass for representing dependencies between DAGs.
These are calculated during serialization and attached to serialized DAGs.
"""
source: str
target: str
dependency_type: str
dependency_id: str | None = None
@property
def node_id(self):
"""Node ID for graph rendering."""
val = f"{self.dependency_type}"
if not self.dependency_type == "dataset":
val += f":{self.source}:{self.target}"
if self.dependency_id:
val += f":{self.dependency_id}"
return val
def _has_kubernetes() -> bool:
global HAS_KUBERNETES
if "HAS_KUBERNETES" in globals():
return HAS_KUBERNETES
# Loading kube modules is expensive, so delay it until the last moment
try:
from kubernetes.client import models as k8s
from airflow.kubernetes.pod_generator import PodGenerator
globals()["k8s"] = k8s
globals()["PodGenerator"] = PodGenerator
# isort: on
HAS_KUBERNETES = True
except ImportError:
HAS_KUBERNETES = False
return HAS_KUBERNETES
| 62,329 | 40.692308 | 110 |
py
|
airflow
|
airflow-main/airflow/serialization/enums.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Enums for DAG serialization."""
from __future__ import annotations
from enum import Enum, unique
# Fields of an encoded object in serialization.
@unique
class Encoding(str, Enum):
"""Enum of encoding constants."""
TYPE = "__type"
VAR = "__var"
# Supported types for encoding. primitives and list are not encoded.
@unique
class DagAttributeTypes(str, Enum):
"""Enum of supported attribute types of DAG."""
DAG = "dag"
OP = "operator"
DATETIME = "datetime"
TIMEDELTA = "timedelta"
TIMEZONE = "timezone"
RELATIVEDELTA = "relativedelta"
DICT = "dict"
SET = "set"
TUPLE = "tuple"
POD = "k8s.V1Pod"
TASK_GROUP = "taskgroup"
EDGE_INFO = "edgeinfo"
PARAM = "param"
XCOM_REF = "xcomref"
DATASET = "dataset"
SIMPLE_TASK_INSTANCE = "simple_task_instance"
BASE_JOB = "Job"
TASK_INSTANCE = "task_instance"
DAG_RUN = "dag_run"
DATA_SET = "data_set"
| 1,728 | 28.810345 | 68 |
py
|
airflow
|
airflow-main/airflow/serialization/serde.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import dataclasses
import enum
import functools
import logging
import sys
from importlib import import_module
from types import ModuleType
from typing import Any, Pattern, TypeVar, Union, cast
import attr
import re2
import airflow.serialization.serializers
from airflow.configuration import conf
from airflow.stats import Stats
from airflow.utils.module_loading import import_string, iter_namespace, qualname
log = logging.getLogger(__name__)
MAX_RECURSION_DEPTH = sys.getrecursionlimit() - 1
CLASSNAME = "__classname__"
VERSION = "__version__"
DATA = "__data__"
SCHEMA_ID = "__id__"
CACHE = "__cache__"
OLD_TYPE = "__type"
OLD_SOURCE = "__source"
OLD_DATA = "__var"
OLD_DICT = "dict"
DEFAULT_VERSION = 0
T = TypeVar("T", bool, float, int, dict, list, str, tuple, set)
U = Union[bool, float, int, dict, list, str, tuple, set]
S = Union[list, tuple, set]
_serializers: dict[str, ModuleType] = {}
_deserializers: dict[str, ModuleType] = {}
_stringifiers: dict[str, ModuleType] = {}
_extra_allowed: set[str] = set()
_primitives = (int, bool, float, str)
_builtin_collections = (frozenset, list, set, tuple) # dict is treated specially.
def encode(cls: str, version: int, data: T) -> dict[str, str | int | T]:
"""Encodes o so it can be understood by the deserializer."""
return {CLASSNAME: cls, VERSION: version, DATA: data}
def decode(d: dict[str, Any]) -> tuple[str, int, Any]:
classname = d[CLASSNAME]
version = d[VERSION]
if not isinstance(classname, str) or not isinstance(version, int):
raise ValueError(f"cannot decode {d!r}")
data = d.get(DATA)
return classname, version, data
def serialize(o: object, depth: int = 0) -> U | None:
"""Serialize an object into a representation consisting only built-in types.
Primitives (int, float, bool, str) are returned as-is. Built-in collections
are iterated over, where it is assumed that keys in a dict can be represented
as str.
Values that are not of a built-in type are serialized if a serializer is
found for them. The order in which serializers are used is
1. A ``serialize`` function provided by the object.
2. A registered serializer in the namespace of ``airflow.serialization.serializers``
3. Annotations from attr or dataclass.
Limitations: attr and dataclass objects can lose type information for nested objects
as they do not store this when calling ``asdict``. This means that at deserialization values
will be deserialized as a dict as opposed to reinstating the object. Provide
your own serializer to work around this.
:param o: The object to serialize.
:param depth: Private tracker for nested serialization.
:raise TypeError: A serializer cannot be found.
:raise RecursionError: The object is too nested for the function to handle.
:return: A representation of ``o`` that consists of only built-in types.
"""
if depth == MAX_RECURSION_DEPTH:
raise RecursionError("maximum recursion depth reached for serialization")
# None remains None
if o is None:
return o
# primitive types are returned as is
if isinstance(o, _primitives):
if isinstance(o, enum.Enum):
return o.value
return o
if isinstance(o, list):
return [serialize(d, depth + 1) for d in o]
if isinstance(o, dict):
if CLASSNAME in o or SCHEMA_ID in o:
raise AttributeError(f"reserved key {CLASSNAME} or {SCHEMA_ID} found in dict to serialize")
return {str(k): serialize(v, depth + 1) for k, v in o.items()}
cls = type(o)
qn = qualname(o)
# custom serializers
dct = {
CLASSNAME: qn,
VERSION: getattr(cls, "__version__", DEFAULT_VERSION),
}
# if there is a builtin serializer available use that
if qn in _serializers:
data, classname, version, is_serialized = _serializers[qn].serialize(o)
if is_serialized:
return encode(classname, version, serialize(data, depth + 1))
# object / class brings their own
if hasattr(o, "serialize"):
data = getattr(o, "serialize")()
# if we end up with a structure, ensure its values are serialized
if isinstance(data, dict):
data = serialize(data, depth + 1)
dct[DATA] = data
return dct
# pydantic models are recursive
if _is_pydantic(cls):
data = o.dict() # type: ignore[attr-defined]
dct[DATA] = serialize(data, depth + 1)
return dct
# dataclasses
if dataclasses.is_dataclass(cls):
# fixme: unfortunately using asdict with nested dataclasses it looses information
data = dataclasses.asdict(o) # type: ignore[call-overload]
dct[DATA] = serialize(data, depth + 1)
return dct
# attr annotated
if attr.has(cls):
# Only include attributes which we can pass back to the classes constructor
data = attr.asdict(cast(attr.AttrsInstance, o), recurse=True, filter=lambda a, v: a.init)
dct[DATA] = serialize(data, depth + 1)
return dct
raise TypeError(f"cannot serialize object of type {cls}")
def deserialize(o: T | None, full=True, type_hint: Any = None) -> object:
"""
Deserialize an object of primitive type and uses an allow list to determine if a class can be loaded.
:param o: primitive to deserialize into an arbitrary object.
:param full: if False it will return a stringified representation
of an object and will not load any classes
:param type_hint: if set it will be used to help determine what
object to deserialize in. It does not override if another
specification is found
:return: object
"""
if o is None:
return o
if isinstance(o, _primitives):
return o
# tuples, sets are included here for backwards compatibility
if isinstance(o, _builtin_collections):
col = [deserialize(d) for d in o]
if isinstance(o, tuple):
return tuple(col)
if isinstance(o, set):
return set(col)
return col
if not isinstance(o, dict):
# if o is not a dict, then it's already deserialized
# in this case we should return it as is
return o
o = _convert(o)
# plain dict and no type hint
if CLASSNAME not in o and not type_hint or VERSION not in o:
return {str(k): deserialize(v, full) for k, v in o.items()}
# custom deserialization starts here
cls: Any
version = 0
value: Any = None
classname = ""
if type_hint:
cls = type_hint
classname = qualname(cls)
version = 0 # type hinting always sets version to 0
value = o
if CLASSNAME in o and VERSION in o:
classname, version, value = decode(o)
if not classname:
raise TypeError("classname cannot be empty")
# only return string representation
if not full:
return _stringify(classname, version, value)
if not _match(classname) and classname not in _extra_allowed:
raise ImportError(
f"{classname} was not found in allow list for deserialization imports. "
f"To allow it, add it to allowed_deserialization_classes in the configuration"
)
cls = import_string(classname)
# registered deserializer
if classname in _deserializers:
return _deserializers[classname].deserialize(classname, version, deserialize(value))
# class has deserialization function
if hasattr(cls, "deserialize"):
return getattr(cls, "deserialize")(deserialize(value), version)
# attr or dataclass or pydantic
if attr.has(cls) or dataclasses.is_dataclass(cls) or _is_pydantic(cls):
class_version = getattr(cls, "__version__", 0)
if int(version) > class_version:
raise TypeError(
"serialized version of %s is newer than module version (%s > %s)",
classname,
version,
class_version,
)
return cls(**deserialize(value))
# no deserializer available
raise TypeError(f"No deserializer found for {classname}")
def _convert(old: dict) -> dict:
"""Converts an old style serialization to new style."""
if OLD_TYPE in old and OLD_DATA in old:
# Return old style dicts directly as they do not need wrapping
if old[OLD_TYPE] == OLD_DICT:
return old[OLD_DATA]
else:
return {CLASSNAME: old[OLD_TYPE], VERSION: DEFAULT_VERSION, DATA: old[OLD_DATA]}
return old
def _match(classname: str) -> bool:
return any(p.match(classname) is not None for p in _get_patterns())
def _stringify(classname: str, version: int, value: T | None) -> str:
"""Convert a previously serialized object in a somewhat human-readable format.
This function is not designed to be exact, and will not extensively traverse
the whole tree of an object.
"""
if classname in _stringifiers:
return _stringifiers[classname].stringify(classname, version, value)
s = f"{classname}@version={version}("
if isinstance(value, _primitives):
s += f"{value})"
elif isinstance(value, _builtin_collections):
# deserialized values can be != str
s += ",".join(str(deserialize(value, full=False)))
elif isinstance(value, dict):
for k, v in value.items():
s += f"{k}={deserialize(v, full=False)},"
s = s[:-1] + ")"
return s
def _is_pydantic(cls: Any) -> bool:
"""Return True if the class is a pydantic model.
Checking is done by attributes as it is significantly faster than
using isinstance.
"""
return hasattr(cls, "__validators__") and hasattr(cls, "__fields__") and hasattr(cls, "dict")
def _register():
"""Register builtin serializers and deserializers for types that don't have any themselves."""
_serializers.clear()
_deserializers.clear()
_stringifiers.clear()
with Stats.timer("serde.load_serializers") as timer:
for _, name, _ in iter_namespace(airflow.serialization.serializers):
name = import_module(name)
for s in getattr(name, "serializers", ()):
if not isinstance(s, str):
s = qualname(s)
if s in _serializers and _serializers[s] != name:
raise AttributeError(f"duplicate {s} for serialization in {name} and {_serializers[s]}")
log.debug("registering %s for serialization", s)
_serializers[s] = name
for d in getattr(name, "deserializers", ()):
if not isinstance(d, str):
d = qualname(d)
if d in _deserializers and _deserializers[d] != name:
raise AttributeError(f"duplicate {d} for deserialization in {name} and {_serializers[d]}")
log.debug("registering %s for deserialization", d)
_deserializers[d] = name
_extra_allowed.add(d)
for c in getattr(name, "stringifiers", ()):
if not isinstance(c, str):
c = qualname(c)
if c in _deserializers and _deserializers[c] != name:
raise AttributeError(f"duplicate {c} for stringifiers in {name} and {_stringifiers[c]}")
log.debug("registering %s for stringifying", c)
_stringifiers[c] = name
log.debug("loading serializers took %.3f seconds", timer.duration)
@functools.lru_cache(maxsize=None)
def _get_patterns() -> list[Pattern]:
patterns = conf.get("core", "allowed_deserialization_classes").split()
return [re2.compile(re2.sub(r"(\w)\.", r"\1\..", p)) for p in patterns]
_register()
| 12,611 | 33.459016 | 110 |
py
|
airflow
|
airflow-main/airflow/serialization/json_schema.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""jsonschema for validating serialized DAG and operator."""
from __future__ import annotations
import pkgutil
from typing import TYPE_CHECKING, Iterable
from airflow.exceptions import AirflowException
from airflow.settings import json
from airflow.typing_compat import Protocol
if TYPE_CHECKING:
import jsonschema
class Validator(Protocol):
"""
This class is only used for type checking.
A workaround for IDEs, mypy, etc. due to the way ``Draft7Validator`` is created.
They are created or do not inherit from proper classes.
Hence, you can not have ``type: Draft7Validator``.
"""
def is_valid(self, instance) -> bool:
"""Check if the instance is valid under the current schema."""
...
def validate(self, instance) -> None:
"""Check if the instance is valid under the current schema, raising validation error if not."""
...
def iter_errors(self, instance) -> Iterable[jsonschema.exceptions.ValidationError]:
"""Lazily yield each of the validation errors in the given instance."""
...
def load_dag_schema_dict() -> dict:
"""Load & return Json Schema for DAG as Python dict."""
schema_file_name = "schema.json"
schema_file = pkgutil.get_data(__name__, schema_file_name)
if schema_file is None:
raise AirflowException(f"Schema file {schema_file_name} does not exists")
schema = json.loads(schema_file.decode())
return schema
def load_dag_schema() -> Validator:
"""Load & Validate Json Schema for DAG."""
import jsonschema
schema = load_dag_schema_dict()
return jsonschema.Draft7Validator(schema)
| 2,432 | 32.791667 | 103 |
py
|
airflow
|
airflow-main/airflow/serialization/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DAG serialization."""
| 812 | 41.789474 | 62 |
py
|
airflow
|
airflow-main/airflow/serialization/helpers.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serialized DAG and BaseOperator."""
from __future__ import annotations
from typing import Any
from airflow.settings import json
def serialize_template_field(template_field: Any) -> str | dict | list | int | float:
"""Return a serializable representation of the templated field.
If ``templated_field`` contains a class or instance that requires recursive
templating, store them as strings. Otherwise simply return the field as-is.
"""
def is_jsonable(x):
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
if not is_jsonable(template_field):
return str(template_field)
else:
return template_field
| 1,515 | 34.255814 | 85 |
py
|
airflow
|
airflow-main/airflow/serialization/pydantic/taskinstance.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Iterable, Optional, Union
from pydantic import BaseModel as BaseModelPydantic
from airflow.serialization.pydantic.dag_run import DagRunPydantic
from airflow.utils.xcom import XCOM_RETURN_KEY
class TaskInstancePydantic(BaseModelPydantic):
"""Serializable representation of the TaskInstance ORM SqlAlchemyModel used by internal API."""
task_id: str
dag_id: str
run_id: str
map_index: int
start_date: Optional[datetime]
end_date: Optional[datetime]
execution_date: Optional[datetime]
duration: Optional[float]
state: Optional[str]
try_number: int
max_tries: int
hostname: str
unixname: str
job_id: Optional[int]
pool: str
pool_slots: int
queue: str
priority_weight: Optional[int]
operator: str
queued_dttm: Optional[str]
queued_by_job_id: Optional[int]
pid: Optional[int]
updated_at: Optional[datetime]
external_executor_id: Optional[str]
trigger_id: Optional[int]
trigger_timeout: Optional[datetime]
next_method: Optional[str]
next_kwargs: Optional[dict]
run_as_user: Optional[str]
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
def xcom_pull(
self,
task_ids: Optional[Union[str, Iterable[str]]] = None,
dag_id: Optional[str] = None,
key: str = XCOM_RETURN_KEY,
include_prior_dates: bool = False,
*,
map_indexes: Optional[Union[int, Iterable[int]]] = None,
default: Any = None,
) -> Any:
"""
Pull an XCom value for this task instance.
TODO: make it works for AIP-44
:param task_ids: task id or list of task ids, if None, the task_id of the current task is used
:param dag_id: dag id, if None, the dag_id of the current task is used
:param key: the key to identify the XCom value
:param include_prior_dates: whether to include prior execution dates
:param map_indexes: map index or list of map indexes, if None, the map_index of the current task
is used
:param default: the default value to return if the XCom value does not exist
:return: Xcom value
"""
return None
def xcom_push(
self,
key: str,
value: Any,
execution_date: Optional[datetime] = None,
) -> None:
"""
Push an XCom value for this task instance.
TODO: make it works for AIP-44
:param key: the key to identify the XCom value
:param value: the value of the XCom
:param execution_date: the execution date to push the XCom for
"""
pass
def get_dagrun(self) -> DagRunPydantic:
"""
Get the DagRun for this task instance.
TODO: make it works for AIP-44
:return: Pydantic serialized version of DaGrun
"""
raise NotImplementedError()
| 3,768 | 31.773913 | 104 |
py
|
airflow
|
airflow-main/airflow/serialization/pydantic/dataset.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel as BaseModelPydantic
class DagScheduleDatasetReferencePydantic(BaseModelPydantic):
"""Serializable version of the DagScheduleDatasetReference ORM SqlAlchemyModel used by internal API."""
dataset_id: int
dag_id: str
created_at: datetime
updated_at: datetime
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
class TaskOutletDatasetReferencePydantic(BaseModelPydantic):
"""Serializable version of the TaskOutletDatasetReference ORM SqlAlchemyModel used by internal API."""
dataset_id: int
dag_id: str
task_id: str
created_at: datetime
updated_at: datetime
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
class DatasetPydantic(BaseModelPydantic):
"""Serializable representation of the Dataset ORM SqlAlchemyModel used by internal API."""
id: int
uri: str
extra: Optional[dict]
created_at: datetime
updated_at: datetime
is_orphaned: bool
consuming_dags: List[DagScheduleDatasetReferencePydantic]
producing_tasks: List[TaskOutletDatasetReferencePydantic]
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
class DatasetEventPydantic(BaseModelPydantic):
"""Serializable representation of the DatasetEvent ORM SqlAlchemyModel used by internal API."""
id: int
source_task_id: Optional[str]
source_dag_id: Optional[str]
source_run_id: Optional[str]
extra: Optional[dict]
source_map_index: int
timestamp: datetime
dataset: DatasetPydantic
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
| 2,670 | 29.701149 | 107 |
py
|
airflow
|
airflow-main/airflow/serialization/pydantic/dag_run.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel as BaseModelPydantic
from airflow.serialization.pydantic.dataset import DatasetEventPydantic
class DagRunPydantic(BaseModelPydantic):
"""Serializable representation of the DagRun ORM SqlAlchemyModel used by internal API."""
id: int
dag_id: str
queued_at: Optional[datetime]
execution_date: datetime
start_date: Optional[datetime]
end_date: Optional[datetime]
state: str
run_id: str
creating_job_id: Optional[int]
external_trigger: bool
run_type: str
data_interval_start: Optional[datetime]
data_interval_end: Optional[datetime]
last_scheduling_decision: Optional[datetime]
dag_hash: Optional[str]
updated_at: datetime
consumed_dataset_events: List[DatasetEventPydantic]
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
| 1,748 | 33.294118 | 93 |
py
|
airflow
|
airflow-main/airflow/serialization/pydantic/__init__.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 |
py
|
airflow
|
airflow-main/airflow/serialization/pydantic/job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Optional
from pydantic import BaseModel as BaseModelPydantic
from airflow.jobs.base_job_runner import BaseJobRunner
def check_runner_initialized(job_runner: Optional[BaseJobRunner], job_type: str) -> BaseJobRunner:
if job_runner is None:
raise ValueError(f"In order to run {job_type} you need to initialize the {job_type}Runner first.")
return job_runner
class JobPydantic(BaseModelPydantic):
"""Serializable representation of the Job ORM SqlAlchemyModel used by internal API."""
id: Optional[int]
dag_id: Optional[str]
state: Optional[str]
job_type: Optional[str]
start_date: Optional[datetime]
end_date: Optional[datetime]
latest_heartbeat: datetime
executor_class: Optional[str]
hostname: Optional[str]
unixname: Optional[str]
# not an ORM field
heartrate: Optional[int]
max_tis_per_query: Optional[int]
class Config:
"""Make sure it deals automatically with SQLAlchemy ORM classes."""
orm_mode = True
| 1,841 | 33.754717 | 106 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/numpy.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from airflow.utils.module_loading import import_string, qualname
# lazy loading for performance reasons
serializers = [
"numpy.int8",
"numpy.int16",
"numpy.int32",
"numpy.int64",
"numpy.uint8",
"numpy.uint16",
"numpy.uint32",
"numpy.uint64",
"numpy.bool_",
"numpy.float64",
"numpy.float16",
"numpy.complex128",
"numpy.complex64",
]
if TYPE_CHECKING:
from airflow.serialization.serde import U
deserializers = serializers
__version__ = 1
def serialize(o: object) -> tuple[U, str, int, bool]:
import numpy as np
if np is None:
return "", "", 0, False
name = qualname(o)
if isinstance(
o,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(o), name, __version__, True
if isinstance(o, np.bool_):
return bool(np), name, __version__, True
if isinstance(
o, (np.float_, np.float16, np.float32, np.float64, np.complex_, np.complex64, np.complex128)
):
return float(o), name, __version__, True
return "", "", 0, False
def deserialize(classname: str, version: int, data: str) -> Any:
if version > __version__:
raise TypeError("serialized version is newer than class version")
if classname not in deserializers:
raise TypeError(f"unsupported {classname} found for numpy deserialization")
return import_string(classname)(data)
| 2,485 | 25.731183 | 100 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/builtin.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, cast
from airflow.utils.module_loading import qualname
if TYPE_CHECKING:
from airflow.serialization.serde import U
__version__ = 1
serializers = ["builtins.frozenset", "builtins.set", "builtins.tuple"]
deserializers = serializers
stringifiers = serializers
def serialize(o: object) -> tuple[U, str, int, bool]:
return list(cast(list, o)), qualname(o), __version__, True
def deserialize(classname: str, version: int, data: list) -> tuple | set | frozenset:
if version > __version__:
raise TypeError("serialized version is newer than class version")
if classname == qualname(tuple):
return tuple(data)
if classname == qualname(set):
return set(data)
if classname == qualname(frozenset):
return frozenset(data)
raise TypeError(f"do not know how to deserialize {classname}")
def stringify(classname: str, version: int, data: list) -> str:
if classname not in stringifiers:
raise TypeError(f"do not know how to stringify {classname}")
s = ",".join(str(d) for d in data)
return f"({s})"
| 1,933 | 31.233333 | 85 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/timezone.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
if TYPE_CHECKING:
from pendulum.tz.timezone import Timezone
from airflow.serialization.serde import U
serializers = ["pendulum.tz.timezone.FixedTimezone", "pendulum.tz.timezone.Timezone"]
deserializers = serializers
__version__ = 1
def serialize(o: object) -> tuple[U, str, int, bool]:
"""Encode a Pendulum Timezone for serialization.
Airflow only supports timezone objects that implements Pendulum's Timezone
interface. We try to keep as much information as possible to make conversion
round-tripping possible (see ``decode_timezone``). We need to special-case
UTC; Pendulum implements it as a FixedTimezone (i.e. it gets encoded as
0 without the special case), but passing 0 into ``pendulum.timezone`` does
not give us UTC (but ``+00:00``).
"""
from pendulum.tz.timezone import FixedTimezone, Timezone
name = qualname(o)
if isinstance(o, FixedTimezone):
if o.offset == 0:
return "UTC", name, __version__, True
return o.offset, name, __version__, True
if isinstance(o, Timezone):
return o.name, name, __version__, True
return "", "", 0, False
def deserialize(classname: str, version: int, data: object) -> Timezone:
from pendulum.tz import fixed_timezone, timezone
if not isinstance(data, (str, int)):
raise TypeError(f"{data} is not of type int or str but of {type(data)}")
if version > __version__:
raise TypeError(f"serialized {version} of {classname} > {__version__}")
if isinstance(data, int):
return fixed_timezone(data)
return timezone(data)
| 2,514 | 33.452055 | 85 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/datetime.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
from airflow.utils.timezone import convert_to_utc, is_naive
if TYPE_CHECKING:
import datetime
from airflow.serialization.serde import U
__version__ = 1
serializers = ["datetime.date", "datetime.datetime", "datetime.timedelta", "pendulum.datetime.DateTime"]
deserializers = serializers
TIMESTAMP = "timestamp"
TIMEZONE = "tz"
def serialize(o: object) -> tuple[U, str, int, bool]:
from datetime import date, datetime, timedelta
if isinstance(o, datetime):
qn = qualname(o)
if is_naive(o):
o = convert_to_utc(o)
tz = o.tzname()
return {TIMESTAMP: o.timestamp(), TIMEZONE: tz}, qn, __version__, True
if isinstance(o, date):
return o.isoformat(), qualname(o), __version__, True
if isinstance(o, timedelta):
return o.total_seconds(), qualname(o), __version__, True
return "", "", 0, False
def deserialize(classname: str, version: int, data: dict | str) -> datetime.date | datetime.timedelta:
import datetime
from pendulum import DateTime
from pendulum.tz import timezone
if classname == qualname(datetime.datetime) and isinstance(data, dict):
return datetime.datetime.fromtimestamp(float(data[TIMESTAMP]), tz=timezone(data[TIMEZONE]))
if classname == qualname(DateTime) and isinstance(data, dict):
return DateTime.fromtimestamp(float(data[TIMESTAMP]), tz=timezone(data[TIMEZONE]))
if classname == qualname(datetime.timedelta) and isinstance(data, (str, float)):
return datetime.timedelta(seconds=float(data))
if classname == qualname(datetime.date) and isinstance(data, str):
return datetime.date.fromisoformat(data)
raise TypeError(f"unknown date/time format {classname}")
| 2,648 | 32.531646 | 104 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/pandas.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
# lazy loading for performance reasons
serializers = [
"pandas.core.frame.DataFrame",
]
deserializers = serializers
if TYPE_CHECKING:
from pandas import DataFrame
from airflow.serialization.serde import U
__version__ = 1
def serialize(o: object) -> tuple[U, str, int, bool]:
import pyarrow as pa
from pandas import DataFrame
from pyarrow import parquet as pq
if not isinstance(o, DataFrame):
return "", "", 0, False
# for now, we *always* serialize into in memory
# until we have a generic backend that manages
# sinks
table = pa.Table.from_pandas(o)
buf = pa.BufferOutputStream()
pq.write_table(table, buf, compression="snappy")
return buf.getvalue().hex().decode("utf-8"), qualname(o), __version__, True
def deserialize(classname: str, version: int, data: object) -> DataFrame:
if version > __version__:
raise TypeError(f"serialized {version} of {classname} > {__version__}")
import io
from pyarrow import parquet as pq
if not isinstance(data, str):
raise TypeError(f"serialized {classname} has wrong data type {type(data)}")
buf = io.BytesIO(bytes.fromhex(data))
df = pq.read_table(buf).to_pandas()
return df
| 2,143 | 29.197183 | 83 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/bignum.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
if TYPE_CHECKING:
import decimal
from airflow.serialization.serde import U
serializers = ["decimal.Decimal"]
deserializers = serializers
__version__ = 1
def serialize(o: object) -> tuple[U, str, int, bool]:
from decimal import Decimal
if not isinstance(o, Decimal):
return "", "", 0, False
name = qualname(o)
_, _, exponent = o.as_tuple()
if isinstance(exponent, int) and exponent >= 0: # No digits after the decimal point.
return int(o), name, __version__, True
# Technically lossy due to floating point errors, but the best we
# can do without implementing a custom encode function.
return float(o), name, __version__, True
def deserialize(classname: str, version: int, data: object) -> decimal.Decimal:
from decimal import Decimal
if version > __version__:
raise TypeError(f"serialized {version} of {classname} > {__version__}")
if classname != qualname(Decimal):
raise TypeError(f"{classname} != {qualname(Decimal)}")
return Decimal(str(data))
| 1,957 | 31.633333 | 89 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/kubernetes.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
# lazy loading for performance reasons
serializers = [
"kubernetes.client.models.v1_resource_requirements.V1ResourceRequirements",
"kubernetes.client.models.v1_pod.V1Pod",
]
if TYPE_CHECKING:
from airflow.serialization.serde import U
__version__ = 1
deserializers: list[type[object]] = []
log = logging.getLogger(__name__)
def serialize(o: object) -> tuple[U, str, int, bool]:
from kubernetes.client import models as k8s
if not k8s:
return "", "", 0, False
if isinstance(o, (k8s.V1Pod, k8s.V1ResourceRequirements)):
from airflow.kubernetes.pod_generator import PodGenerator
# We're running this in an except block, so we don't want it to fail
# under any circumstances, e.g. accessing a non-existing attribute.
def safe_get_name(pod):
try:
return pod.metadata.name
except Exception:
return None
try:
return PodGenerator.serialize_pod(o), qualname(o), __version__, True
except Exception:
log.warning("Serialization failed for pod %s", safe_get_name(o))
log.debug("traceback for serialization error", exc_info=True)
return "", "", 0, False
return "", "", 0, False
| 2,183 | 32.6 | 80 |
py
|
airflow
|
airflow-main/airflow/serialization/serializers/__init__.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream.py
|
import os
import gc
import argparse
import MinkowskiEngine as ME
import pytorch_lightning as pl
from downstream.evaluate import evaluate
from utils.read_config import generate_config
from downstream.model_builder import make_model
from pytorch_lightning.plugins import DDPPlugin
from downstream.lightning_trainer import LightningDownstream
from downstream.lightning_datamodule import DownstreamDataModule
from downstream.dataloader_kitti import make_data_loader as make_data_loader_kitti
from downstream.dataloader_nuscenes import make_data_loader as make_data_loader_nuscenes
from downstream.dataloader_scannet import make_data_loader as make_data_loader_scannet
def main():
"""
Code for launching the downstream training
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default="config/semseg_nuscenes.yaml", help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
parser.add_argument(
"--pretraining_path", type=str, default=None, help="provide a path to pre-trained weights"
)
args = parser.parse_args()
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
if args.pretraining_path:
config['pretraining_path'] = args.pretraining_path
if os.environ.get("LOCAL_RANK", 0) == 0:
print(
"\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
)
dm = DownstreamDataModule(config)
model = make_model(config, config["pretraining_path"])
if config["num_gpus"] > 1:
model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model)
module = LightningDownstream(model, config)
path = os.path.join(config["working_dir"], config["datetime"])
trainer = pl.Trainer(
gpus=config["num_gpus"],
accelerator="ddp",
default_root_dir=path,
checkpoint_callback=True,
max_epochs=config["num_epochs"],
plugins=DDPPlugin(find_unused_parameters=True),
num_sanity_val_steps=0,
resume_from_checkpoint=config["resume_path"],
check_val_every_n_epoch=10,
)
print("Starting the training")
trainer.fit(module, dm)
print("Training finished, now evaluating the results")
del trainer
del dm
del module
gc.collect()
if config["dataset"].lower() == "nuscenes":
phase = "verifying" if config['training'] in ("parametrize", "parametrizing") else "val"
val_dataloader = make_data_loader_nuscenes(
config, phase, num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "kitti":
val_dataloader = make_data_loader_kitti(
config, "val", num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "scannet":
val_dataloader = make_data_loader_scannet(
config, "val", num_threads=config["num_threads"]
)
evaluate(model.to(0), val_dataloader, config)
if __name__ == "__main__":
main()
| 3,175 | 36.809524 | 109 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain.py
|
import os
import argparse
import torch.nn as nn
# import MinkowskiEngine as ME
import pytorch_lightning as pl
from utils.read_config import generate_config
from pretrain.model_builder import make_model
from pytorch_lightning.plugins import DDPPlugin
from pretrain.lightning_trainer import LightningPretrain
from pretrain.lightning_datamodule import PretrainDataModule
from pretrain.lightning_trainer_spconv import LightningPretrainSpconv
def main():
"""
Code for launching the pretraining
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default="config/slidr_minkunet.yaml", help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
args = parser.parse_args()
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
if os.environ.get("LOCAL_RANK", 0) == 0:
print(
"\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
)
dm = PretrainDataModule(config)
model_points, model_images, model_fusion = make_model(config)
if config["num_gpus"] > 1:
# model_points = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model_points)
model_images = nn.SyncBatchNorm.convert_sync_batchnorm(model_images)
model_points = model_points #nn.SyncBatchNorm.convert_sync_batchnorm(model_points)
model_fusion = nn.SyncBatchNorm.convert_sync_batchnorm(model_fusion)
if config["model_points"] == "minkunet":
module = LightningPretrain(model_points, model_images, model_fusion, config)
elif config["model_points"] == "voxelnet":
module = LightningPretrainSpconv(model_points, model_images, config)
path = os.path.join(config["working_dir"], config["datetime"])
trainer = pl.Trainer(
gpus=config["num_gpus"],
accelerator="ddp",
default_root_dir=path,
checkpoint_callback=True,
max_epochs=config["num_epochs"],
plugins=DDPPlugin(find_unused_parameters=True),
num_sanity_val_steps=0,
resume_from_checkpoint=config["resume_path"],
check_val_every_n_epoch=10,
)
print("Starting the training")
trainer.fit(module, dm)
if __name__ == "__main__":
main()
| 2,421 | 36.261538 | 108 |
py
|
CLIP2Scene
|
CLIP2Scene-main/evaluate.py
|
import torch
import argparse
from downstream.evaluate import evaluate
from utils.read_config import generate_config
from downstream.model_builder import make_model
from downstream.dataloader_kitti import make_data_loader as make_data_loader_kitti
from downstream.dataloader_nuscenes import make_data_loader as make_data_loader_nuscenes
def main():
"""
Code for launching the downstream evaluation
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default=None, help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
parser.add_argument(
"--dataset", type=str, default=None, help="Choose between nuScenes and KITTI"
)
args = parser.parse_args()
if args.cfg_file is None and args.dataset is not None:
if args.dataset.lower() == "kitti":
args.cfg_file = "config/semseg_kitti.yaml"
elif args.dataset.lower() == "nuscenes":
args.cfg_file = "config/semseg_nuscenes.yaml"
else:
raise Exception(f"Dataset not recognized: {args.dataset}")
elif args.cfg_file is None:
args.cfg_file = "config/semseg_nuscenes.yaml"
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
print("\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items()))))
print("Creating the loaders")
if config["dataset"].lower() == "nuscenes":
phase = "verifying" if config['training'] in ("parametrize", "parametrizing") else "val"
val_dataloader = make_data_loader_nuscenes(
config, phase, num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "kitti":
val_dataloader = make_data_loader_kitti(
config, "val", num_threads=config["num_threads"]
)
else:
raise Exception(f"Dataset not recognized: {args.dataset}")
print("Creating the model")
model = make_model(config, config["pretraining_path"]).to(0)
checkpoint = torch.load(config["resume_path"], map_location=torch.device(0))
if "config" in checkpoint:
for cfg in ("voxel_size", "cylindrical_coordinates"):
assert checkpoint["config"][cfg] == config[cfg], (
f"{cfg} is not consistant.\n"
f"Checkpoint: {checkpoint['config'][cfg]}\n"
f"Config: {config[cfg]}."
)
try:
model.load_state_dict(checkpoint["model_points"])
except KeyError:
weights = {
k.replace("model.", ""): v
for k, v in checkpoint["state_dict"].items()
if k.startswith("model.")
}
model.load_state_dict(weights)
evaluate(model, val_dataloader, config)
if __name__ == "__main__":
main()
| 2,938 | 37.671053 | 103 |
py
|
CLIP2Scene
|
CLIP2Scene-main/superpixel_segmenter.py
|
import os
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm
from multiprocessing import Pool
from skimage.segmentation import slic
from nuscenes.nuscenes import NuScenes
def compute_slic(cam_token):
cam = nusc.get("sample_data", cam_token)
im = Image.open(os.path.join(nusc.dataroot, cam["filename"]))
segments_slic = slic(
im, n_segments=150, compactness=6, sigma=3.0, start_label=0
).astype(np.uint8)
im = Image.fromarray(segments_slic)
im.save(
"./superpixels/nuscenes/superpixels_slic/" + cam["token"] + ".png"
)
def compute_slic_30(cam_token):
cam = nusc.get("sample_data", cam_token)
im = Image.open(os.path.join(nusc.dataroot, cam["filename"]))
segments_slic = slic(
im, n_segments=30, compactness=6, sigma=3.0, start_label=0
).astype(np.uint8)
im = Image.fromarray(segments_slic)
im.save(
"./superpixels/nuscenes/superpixels_slic_30/" + cam["token"] + ".png"
)
if __name__ == "__main__":
nuscenes_path = "datasets/nuscenes"
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--model", type=str, default="minkunet", help="specify the model targeted, either minkunet or voxelnet"
)
assert os.path.exists(nuscenes_path), f"nuScenes not found in {nuscenes_path}"
args = parser.parse_args()
assert args.model in ["minkunet", "voxelnet"]
nusc = NuScenes(
version="v1.0-trainval", dataroot=nuscenes_path, verbose=False
)
os.makedirs("superpixels/nuscenes/superpixels_slic/")
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
with Pool(6) as p:
for scene_idx in tqdm(range(len(nusc.scene))):
scene = nusc.scene[scene_idx]
current_sample_token = scene["first_sample_token"]
while current_sample_token != "":
current_sample = nusc.get("sample", current_sample_token)
if args.model == "minkunet":
func = compute_slic
elif args.model == "voxelnet":
func = compute_slic_30
p.map(
func,
[
current_sample["data"][camera_name]
for camera_name in camera_list
],
)
current_sample_token = current_sample["next"]
| 2,523 | 33.108108 | 111 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/dataloader_scannet.py
|
import os
import copy
import torch
import numpy as np
from PIL import Image
import MinkowskiEngine as ME
from torch.utils.data import Dataset
# import pc_utils
from plyfile import PlyData, PlyElement
import math
# from pc_utils import write_ply_rgb
import sys
sys.path.append("..")
# from MinkowskiEngine.utils import sparse_quantize
import imageio
import cv2
import random
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
colors = [(colors[i, 0], colors[i, 1], colors[i, 2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def scannet_collate_pair_fn(batch):
(
coords,
feats,
labels,
imgs,
pairing_points,
pairing_images,
inverse_indexes,
scan_names,
) = list(zip(*batch))
offset_point = 0
offset_image = 0
for batch_id in range(len(coords)):
pairing_points[batch_id][:] += offset_point
offset_point += coords[batch_id].shape[0]
pairing_images[batch_id][:, 0] += offset_image
offset_image += imgs[batch_id].shape[0]
coords = ME.utils.batched_coordinates(coords, dtype=torch.float32)
feats = torch.cat(feats, dim=0)
imgs = torch.cat(imgs, dim=0)
pairing_points = torch.cat(pairing_points, dim=0)
pairing_images = torch.cat(pairing_images, dim=0)
return {
"sinput_C": coords,
"sinput_F": feats,
"input_I": imgs,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"inverse_indexes": inverse_indexes,
}
class scannet_Dataset(Dataset):
def __init__(self, phase, config, shuffle = True, cloud_transforms = None, mixed_transforms = None):
self.scannet_root_dir = config['dataRoot_scannet']
if phase == 'train':
self.scannet_file_list = self.read_files(config['train_file'])
else:
self.scannet_file_list = self.read_files(config['val_file'])
self.mixed_transforms = mixed_transforms
self.voxel_size = config['voxel_size']
self.phase = phase
self.config = config
self.imageDim = (640, 480)
# self.imageDim = (224, 416)
self.cloud_transforms = cloud_transforms
self.maxImages = 8
def read_files(self, file):
f = open(file)
lines = f.readlines()
name_list = [line.split('.')[0] for line in lines]
f.close()
return name_list
def __len__(self):
return len(self.scannet_file_list)
def read_pose_file(self, fname):
posemat = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return posemat
def read_intrinsic_file(self, fname):
intrinsic = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return intrinsic
def read_txt(self, path):
# Read txt file into lines.
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def computeLinking(self, camera_to_world, coords, depth, link_proj_threshold, intrinsic_color, intrinsic_depth, imageDim):
"""
:param camera_to_world: 4 x 4
:param coords: N x 3 format
:param depth: H x W format
:intrinsic_depth: 4 x 4
:intrinsic_color: 4 x 4, not used currently
:return: linking, N x 3 format, (H,W,mask)
"""
# print("imageDim ", imageDim)
intrinsic = intrinsic_depth
link = np.zeros((3, coords.shape[0]), dtype=float)
coordsNew = np.concatenate([coords, np.ones([coords.shape[0], 1])], axis=1).T #4 x N
assert coordsNew.shape[0] == 4, "[!] Shape error"
world_to_camera = np.linalg.inv(camera_to_world) # 4 x 4
p = np.matmul(world_to_camera, coordsNew) # 4 x N
p[0] = (p[0] * intrinsic[0][0]) / p[2] + intrinsic[0][2]
p[1] = (p[1] * intrinsic[1][1]) / p[2] + intrinsic[1][2]
pi = p
inside_mask = (pi[0] >= 0) * (pi[1] >= 0) * (pi[0] <= imageDim[1] - 1) * (pi[1] <= imageDim[0]-1)
occlusion_mask = np.abs(depth[np.round(pi[1][inside_mask]).astype(np.int), np.round(pi[0][inside_mask]).astype(np.int)] - p[2][inside_mask]) <= link_proj_threshold
inside_mask[inside_mask == True] = occlusion_mask
link[0][inside_mask] = pi[1][inside_mask]
link[1][inside_mask] = pi[0][inside_mask]
link[2][inside_mask] = 1
return link.T
def __getitem__(self, idx):
path = os.path.join(self.scannet_root_dir, self.scannet_file_list[idx], self.scannet_file_list[idx]+"_new_semantic.npy")
data = torch.from_numpy(np.load(path))
coords, feats, labels = data[:, :3], data[:, 3: 6], data[:, 9:]
sceneName = self.scannet_file_list[idx]
feats = feats / 127.5 - 1
frame_names = []
imgs = []
links = []
intrinsic_color = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_color.txt'))
intrinsic_depth = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_depth.txt'))
for framename in os.listdir(os.path.join(self.config['dataRoot_images'], sceneName, 'color')):
frame_names.append(framename.split('.')[0])
pairing_points = []
pairing_images = []
frame_names = random.sample(frame_names, min(self.maxImages, len(frame_names)))
for i, frameid in enumerate(frame_names):
f = os.path.join(self.config['dataRoot_images'], sceneName, 'color', frameid + '.jpg')
img = imageio.imread(f) / 255
img = cv2.resize(img, self.imageDim)
depth = imageio.imread(f.replace('color', 'depth').replace('.jpg', '.png')) / 1000.0 # convert to meter
posePath = f.replace('color', 'pose').replace('.jpg', '.txt')
pose = self.read_pose_file(posePath)
link = self.computeLinking(pose, coords, depth, 0.05, intrinsic_color, intrinsic_depth, depth.shape)
pairing_point = torch.from_numpy(np.argwhere(link[:, 2] == 1)).squeeze()
pairing_points.append(pairing_point)
link = torch.from_numpy(link).int()
imgs.append(torch.from_numpy(img.transpose((2, 0, 1))))
pairing_image = link[pairing_point, :2]
pairing_images.append(torch.cat((torch.ones(pairing_point.shape[0], 1) * i,
pairing_image), dim=1))
imgs = torch.stack(imgs)
pairing_points = torch.cat(pairing_points, dim=0).numpy()
pairing_images = torch.cat(pairing_images, dim=0).numpy()
if self.cloud_transforms:
coords = self.cloud_transforms(coords.float())
if self.mixed_transforms:
(
coords_b,
feats_b,
imgs_b,
pairing_points_b,
pairing_images_b,
) = self.mixed_transforms(
coords, feats, imgs, pairing_points, pairing_images
)
coords, feats, imgs, pairing_points, pairing_images = coords_b, feats_b, imgs_b, torch.from_numpy(pairing_points_b),\
torch.from_numpy(pairing_images_b)
coords = (coords - coords.mean(0)) / self.voxel_size
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
feats = feats[indexes]
assert pairing_points.shape[0] == pairing_images.shape[0]
packages = (discrete_coords, feats, labels, imgs, pairing_points, pairing_images, inverse_indexes, self.scannet_file_list[idx])
return packages
| 8,764 | 35.67364 | 171 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/lightning_datamodule.py
|
import torch
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from pretrain.dataloader_nuscenes import (
NuScenesMatchDataset,
minkunet_collate_pair_fn,
)
from pretrain.dataloader_kitti import (
KittiMatchDataset,
kitti_collate_pair_fn,
)
from pretrain.dataloader_scannet import (
scannet_Dataset,
scannet_collate_pair_fn,
)
# try:
# from pretrain.dataloader_scannet import (
# scannet_Dataset,
# scannet_collate_pair_fn,
# )
# except ImportError:
# scannet_Dataset = None
# scannet_collate_pair_fn = None
try:
from pretrain.dataloader_nuscenes_spconv import NuScenesMatchDatasetSpconv, spconv_collate_pair_fn
except ImportError:
NuScenesMatchDatasetSpconv = None
spconv_collate_pair_fn = None
from utils.transforms import (
make_transforms_clouds,
make_transforms_asymmetrical,
make_transforms_asymmetrical_val,
)
class PretrainDataModule(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
self.config = config
if config["num_gpus"]:
self.batch_size = config["batch_size"] // config["num_gpus"]
else:
self.batch_size = config["batch_size"]
def setup(self, stage):
cloud_transforms_train = make_transforms_clouds(self.config)
mixed_transforms_train = make_transforms_asymmetrical(self.config)
cloud_transforms_val = None
mixed_transforms_val = make_transforms_asymmetrical_val(self.config)
if self.config["dataset"].lower() == "nuscenes" and self.config["model_points"] == "minkunet":
Dataset = NuScenesMatchDataset
elif self.config["dataset"].lower() == "kitti":
Dataset = KittiMatchDataset
elif self.config["dataset"].lower() == "scannet":
Dataset = scannet_Dataset
elif self.config["dataset"].lower() == "nuscenes" and self.config["model_points"] == "voxelnet":
Dataset = NuScenesMatchDatasetSpconv
else:
raise Exception("Dataset Unknown")
# print(self.config["dataset"].lower())
# print(type(Dataset))
if self.config["training"] in ("parametrize", "parametrizing"):
phase_train = "parametrizing"
phase_val = "verifying"
else:
phase_train = "train"
phase_val = "val"
self.train_dataset = Dataset(
phase=phase_train,
config=self.config,
shuffle=True,
cloud_transforms=cloud_transforms_train,
mixed_transforms=mixed_transforms_train,
)
print("Dataset Loaded")
print("training size: ", len(self.train_dataset))
if self.config["dataset"].lower() == "nuscenes":
self.val_dataset = Dataset(
phase=phase_val,
shuffle=False,
cloud_transforms=cloud_transforms_val,
mixed_transforms=mixed_transforms_val,
config=self.config,
cached_nuscenes=self.train_dataset.nusc,
# cached_nuscenes=None,
)
else:
self.val_dataset = Dataset(
phase=phase_val,
shuffle=False,
cloud_transforms=cloud_transforms_val,
mixed_transforms=mixed_transforms_val,
config=self.config,
# cached_nuscenes=self.train_dataset.nusc,
# cached_nuscenes=None,
)
print("validation size: ", len(self.val_dataset))
def train_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=True,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
def val_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
| 5,540 | 33.203704 | 104 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/lightning_trainer.py
|
import os
import re
import torch
import numpy as np
import torch.optim as optim
import MinkowskiEngine as ME
import pytorch_lightning as pl
from utils.chamfer_distance import ComputeCDLoss
from pretrain.criterion import NCELoss, DistillKL, semantic_NCELoss
from pytorch_lightning.utilities import rank_zero_only
from torchsparse import SparseTensor as spvcnn_SparseTensor
from torch import nn
import torch.nn.functional as F
import random
import numba as nb
@nb.jit()
def nb_pack(counts):
return [np.array(list(range(i))) for i in counts]
class LightningPretrain(pl.LightningModule):
def __init__(self, model_points, model_images, model_fusion, config):
super().__init__()
self.model_points = model_points
self.model_images = model_images
self.model_fusion = model_fusion
self._config = config
self.losses = config["losses"]
self.train_losses = []
self.val_losses = []
self.num_matches = config["num_matches"]
self.batch_size = config["batch_size"]
self.num_epochs = config["num_epochs"]
self.superpixel_size = config["superpixel_size"]
self.epoch = 0
self.cot = 0
self.CE = nn.CrossEntropyLoss()
self.CD_loss = ComputeCDLoss()
self.KLloss = DistillKL(T=1)
if config["resume_path"] is not None:
self.epoch = int(
re.search(r"(?<=epoch=)[0-9]+", config["resume_path"])[0]
)
self.criterion = NCELoss(temperature=config["NCE_temperature"])
self.sem_NCE = semantic_NCELoss(temperature=config["NCE_temperature"])
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
self.text_embeddings_path = config['text_embeddings_path']
text_categories = config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.saved = False
self.max_size = 8
def get_in_field(self, coords, feats):
in_field = ME.TensorField(coordinates=coords.float(), features=feats.int(),
# coordinate_map_key=A.coordiante_map_key, coordinate_manager=A.coordinate_manager,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
# minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
# device=self.config.device,
).float()
return in_field
def configure_optimizers(self):
optimizer = optim.SGD(
list(self.model_points.parameters()) + list(self.model_images.parameters()) + list(self.model_fusion.parameters()),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
def training_step(self, batch, batch_idx):
self.model_points.train()
sinput_C = batch["sinput_C"]
sinput_F = batch["sinput_F"]
if self._config['dataset'] == "nuscenes":
sweepIds = batch["sweepIds"]
if self._config['max_sweeps'] > 1:
for sweepid in range(1, self._config['max_sweeps']):
sweepInd = sweepIds == sweepid
sinput_C[sweepInd, -1] = sinput_C[sweepInd, -1] + self._config['batch_size'] * sweepid
if self._config['dataset'] == "scannet":
sparse_input = ME.SparseTensor(sinput_F.float(), coordinates=sinput_C.int())
else:
sparse_input = spvcnn_SparseTensor(sinput_F, sinput_C)
output_points = self.model_points(sparse_input)
output_images = self.model_images(batch["input_I"].float())
del batch["sinput_F"]
del batch["sinput_C"]
del batch["input_I"]
del sparse_input
# each loss is applied independtly on each GPU
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=self.batch_size
)
if not self.saved:
if self.epoch == 10:
self.save()
self.saved = True
self.train_losses.append(loss.detach().cpu())
return loss
def scannet_loss(self, batch, output_points, output_images):
# output_images.shape: torch.Size([96, 64, 224, 416])
# output_points.shape: torch.Size([225648, 64])
# pairing_points.shape: torch.Size([214155])
# pairing_images.shape: torch.Size([214155, 3])
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
image_feats, image_pred = output_images
point_feats_a, point_feats_b = output_points
# global
point_logists = F.conv1d(point_feats_a.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
k_logists = point_logists[pairing_points]
m_pred = tuple(pairing_images.T.long())
q_pred = image_pred[m_pred]
# switchable training strategy
if self.epoch >= 10:
rd = random.randint(1, 10)
if rd > 5: q_pred = k_logists.argmax(dim=1)
loss_semantic = self.CE(k_logists, q_pred)
point_feats_b = point_feats_b[pairing_points]
image_feats = image_feats.permute(0, 2, 3, 1)[m_pred]
loss_spatial = torch.mean(1 - F.cosine_similarity(image_feats, point_feats_b, dim=1))
return loss_semantic + loss_spatial
def feature_packaging(self, image_global_allpoints, point_global_allpoints, inverse_indexes_merged, image_pred):
uni_feature = torch.cat((image_global_allpoints, point_global_allpoints, image_pred.unsqueeze(-1)), dim=1)
max_inverse_indexes = inverse_indexes_merged.max()
feature_packages = torch.zeros((max_inverse_indexes + 1) * self.max_size, uni_feature.shape[1]).cuda()
sorted_inverse_indexes, sorted_indices = torch.sort(inverse_indexes_merged)
uni_feature = uni_feature[sorted_indices]
_, counts = torch.unique(sorted_inverse_indexes, return_counts=True)
offset = nb_pack(counts.detach().cpu().numpy())
offset = torch.from_numpy(np.concatenate(offset, axis=0)).cuda()
valid_index = offset < self.max_size
offset = offset[valid_index]
sorted_inverse_indexes = sorted_inverse_indexes[valid_index]
uni_feature = uni_feature[valid_index]
index = sorted_inverse_indexes * self.max_size + offset
feature_packages[index] = uni_feature
feature_packages = feature_packages.view((max_inverse_indexes + 1), self.max_size, uni_feature.shape[1])
return feature_packages
def loss_nuscenes(self, batch, output_points, output_images):
# output_images.shape: torch.Size([96, 64, 224, 416])
# output_points.shape: torch.Size([225648, 64])
# pairing_points.shape: torch.Size([214155])
# pairing_images.shape: torch.Size([214155, 3])
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
inverse_indexes_group = batch["inverse_indexes_group"]
inverse_indexes_merged = batch['inverse_indexes_merged']
image_global, image_pred = output_images
point_local, point_global = output_points
point_local = point_local[inverse_indexes_group]
point_local_allpoints = point_local[pairing_points]
point_global = point_global[inverse_indexes_group]
point_global_allpoints = point_global[pairing_points]
inverse_indexes_merged = inverse_indexes_merged[pairing_points]
m_pred = tuple(pairing_images.T.long())
image_global_allpoints = image_global.permute(0, 2, 3, 1)[m_pred]
image_pred = image_pred[m_pred]
feature_packages = self.feature_packaging(image_global_allpoints, point_local_allpoints, inverse_indexes_merged, image_pred)
super_nodes_points, inner_products, pixel_pred = self.model_fusion(feature_packages)
super_nodes_logit = F.conv1d(point_global_allpoints.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
loss_semantic = 0
# Switchable Self-training Strategy
if self.epoch > 10:
index_set = set(np.array(list(range(inverse_indexes_group.shape[0]))))
pairing_set = set(pairing_points.detach().long().cpu().numpy())
index_set_rest = list(index_set - pairing_set)
point_global_rest = point_global[index_set_rest]
point_global_logits = F.conv1d(point_global_rest.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
point_global_pred = point_global_logits.argmax(dim=1)
loss_semantic += self.CE(point_global_logits, point_global_pred)
rd = random.randint(1, 10)
if rd > 5: image_pred = super_nodes_logit.argmax(dim=1)
loss_semantic = self.CE(super_nodes_logit, image_pred)
loss_spatial_temporal = torch.mean(1 - inner_products)
return loss_semantic + loss_spatial_temporal
def loss(self, batch, output_points, output_images):
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
idx = np.random.choice(pairing_points.shape[0], self.num_matches, replace=False)
k = output_points[pairing_points[idx]]
m = tuple(pairing_images[idx].T.long())
q = output_images.permute(0, 2, 3, 1)[m]
return self.criterion(k, q)
def loss_superpixels_average(self, batch, output_points, output_images):
# compute a superpoints to superpixels loss using superpixels
torch.cuda.empty_cache() # This method is extremely memory intensive
superpixels = batch["superpixels"]
pairing_images = batch["pairing_images"]
pairing_points = batch["pairing_points"]
superpixels = (
torch.arange(
0,
output_images.shape[0] * self.superpixel_size,
self.superpixel_size,
device=self.device,
)[:, None, None] + superpixels
)
m = tuple(pairing_images.cpu().T.long())
superpixels_I = superpixels.flatten()
idx_P = torch.arange(pairing_points.shape[0], device=superpixels.device)
total_pixels = superpixels_I.shape[0]
idx_I = torch.arange(total_pixels, device=superpixels.device)
with torch.no_grad():
one_hot_P = torch.sparse_coo_tensor(
torch.stack((
superpixels[m], idx_P
), dim=0),
torch.ones(pairing_points.shape[0], device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, pairing_points.shape[0])
)
one_hot_I = torch.sparse_coo_tensor(
torch.stack((
superpixels_I, idx_I
), dim=0),
torch.ones(total_pixels, device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, total_pixels)
)
k = one_hot_P @ output_points[pairing_points]
k = k / (torch.sparse.sum(one_hot_P, 1).to_dense()[:, None] + 1e-6)
q = one_hot_I @ output_images.permute(0, 2, 3, 1).flatten(0, 2)
q = q / (torch.sparse.sum(one_hot_I, 1).to_dense()[:, None] + 1e-6)
mask = torch.where(k[:, 0] != 0)
k = k[mask]
q = q[mask]
return self.criterion(k, q)
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
return super().training_epoch_end(outputs)
def validation_step(self, batch, batch_idx):
sinput_C = batch["sinput_C"]
sinput_F = batch["sinput_F"]
if self._config['dataset'] == "scannet":
sparse_input = ME.SparseTensor(sinput_F.float(), coordinates=sinput_C.int())
else:
sparse_input = spvcnn_SparseTensor(sinput_F, sinput_C)
output_points = self.model_points(sparse_input)
self.model_images.eval()
output_images = self.model_images(batch["input_I"])
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True, batch_size=self.batch_size
)
return loss
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{
"model_points": self.model_points.state_dict(),
"model_images": self.model_images.state_dict(),
"model_fusion": self.model_fusion.state_dict(),
"epoch": self.epoch,
"config": self._config,
},
path,
)
| 14,055 | 39.507205 | 132 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/dataloader_nuscenes.py
|
import os
import copy
import torch
import numpy as np
from PIL import Image
# import MinkowskiEngine as ME
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from torchsparse.utils.quantize import sparse_quantize
from abc import ABC, abstractmethod
import json
import cv2
import pickle
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def minkunet_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
inverse_indexes_merged,
sweepIds_group,
sweep_pairing_group,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
offset_inverse_indexes = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset_inverse_indexes
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
inverse_indexes[batch_id][:] += offset
inverse_indexes_merged[batch_id][:] += offset
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
offset_inverse_indexes += inverse_indexes[batch_id].shape[0]
coords_batch = torch.cat(coords, 0).int()
pairing_points = torch.cat(pairing_points, 0)
pairing_images = torch.cat(pairing_images, 0)
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
sweepIds_group = torch.cat(sweepIds_group, 0)
inverse_indexes_merged = torch.cat(inverse_indexes_merged, 0)
inverse_indexes_group = torch.cat(inverse_indexes, 0)
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes_group": inverse_indexes_group,
"inverse_indexes_merged": inverse_indexes_merged,
"sweepIds": sweepIds_group,
"sweep_pairing_group": sweep_pairing_group,
}
class NuScenesMatchDataset(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
self.config = config
self.dataroot = config['dataRoot_nuscenes']
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot=self.dataroot, verbose=False
)
self.list_keyframes = []
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
skip_counter = 0
if phase in ("train", "val", "test"):
phase_scenes = create_splits_scenes()[phase]
elif phase == "parametrizing":
phase_scenes = list(
set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
)
elif phase == "verifying":
phase_scenes = CUSTOM_SPLIT
# create a list of camera & lidar scans
for scene_idx in range(len(self.nusc.scene)):
scene = self.nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
skip_counter += 1
if skip_counter % skip_ratio == 0:
self.create_list_of_scans(scene)
with open('/nvme/konglingdong/youquan/nuscenes_infos_10sweeps_train.pkl', 'rb') as f:
self.sweeps_infos = pickle.load(f)
tem = {}
for info in self.sweeps_infos:
tem[info['lidar_path']] = {'sweeps': info['sweeps']}
self.sweeps_infos = tem
self.max_sweeps = self.config['max_sweeps']
print(phase)
print(len(phase_scenes))
def create_list_of_scans(self, scene):
# Get first and last keyframe in the scene
current_sample_token = scene["first_sample_token"]
# print("current_sample_token", current_sample_token)
# Loop to get all successive keyframes
list_data = []
while current_sample_token != "":
current_sample = self.nusc.get("sample", current_sample_token)
list_data.append(current_sample["data"])
current_sample_token = current_sample["next"]
# Add new scans in the list
self.list_keyframes.extend(list_data)
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
lidar_name = sweep_info['lidar_path']
lidar_path = os.path.join(self.dataroot, lidar_name)
pc_original = LidarPointCloud.from_file(lidar_path)
points_sweep = pc_original.points.T[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, lidar_name, max_sweeps=1):
info = self.sweeps_infos[lidar_name]
lidar_path = os.path.join(self.nusc.dataroot, lidar_name)
pc_original = LidarPointCloud.from_file(lidar_path)
points = pc_original.points.T[:, :4]
name_list = [lidar_name]
sweep_points_list = [points]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
name_list.append(info['sweeps'][k]['lidar_path'])
points = np.concatenate(sweep_points_list, axis=0)
return sweep_points_list, points
def map_pointcloud_to_image(self, point_merged, data, lidar_name, min_dist: float = 1.0, multi_sweeps=True):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
pc_original = LidarPointCloud.from_points(point_merged)
pc_ref = pc_original.points
images = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
for i, camera_name in enumerate(camera_list):
pc = copy.deepcopy(pc_original)
cam = self.nusc.get("sample_data", data[camera_name])
im = np.array(Image.open(os.path.join(self.nusc.dataroot, cam["filename"])))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
cs_record = self.nusc.get(
"calibrated_sensor", pointsensor["calibrated_sensor_token"]
)
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = self.nusc.get(
"calibrated_sensor", cam["calibrated_sensor_token"]
)
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
return pc_ref.T, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_keyframes)
def voxelizaton(self, pc):
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
phi = torch.atan2(y, x) * 180 / np.pi # corresponds to a split each 1°
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
discrete_coords, indexes, inverse_indexes = sparse_quantize(
coords_aug.contiguous().numpy(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
return discrete_coords, indexes, inverse_indexes
def __getitem__(self, idx):
data = self.list_keyframes[idx]
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
lidar_name = pointsensor["filename"]
sweep_points_list, point_merged = self.get_lidar_with_sweeps(lidar_name, max_sweeps=self.max_sweeps)
point_merged = torch.from_numpy(point_merged)
pc = point_merged[:, :3]
"""
# merged point cloud
"""
discrete_coords_merged, indexes_merged, inverse_indexes_merged = self.voxelizaton(pc)
"""
# sweep point cloud
"""
discrete_coords_group = []
inverse_indexes_group = []
unique_feats_group = []
sweepIds_group = []
pairing_points_group = []
images_group = []
pairing_images_group = []
sweep_pairing_group = []
t = 0
offset_points = 0
offset_inverse_indexes = 0
for sweep_id, sweep_points in enumerate(sweep_points_list):
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(sweep_points, data, lidar_name, multi_sweeps=False)
intensity = torch.tensor(sweep_points[:, 3:])
pc = torch.tensor(sweep_points[:, :3])
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images
)
discrete_coords, indexes, inverse_indexes = self.voxelizaton(pc)
pairing_points_group.append(torch.from_numpy(pairing_points[:]) + offset_inverse_indexes)
pairing_images[:, 0] += sweep_id * 6
pairing_images_group.append(torch.from_numpy(pairing_images))
inverse_indexes_group.append(inverse_indexes[:] + offset_points)
discrete_coords_group.append(discrete_coords)
unique_feats_group.append(intensity[indexes])
images_group.append(images)
sweepIds_group.append(t * torch.ones(discrete_coords.shape[0]))
sweep_pairing_group.append(t * torch.ones(pairing_images.shape[0]))
offset_points += discrete_coords.shape[0]
offset_inverse_indexes += inverse_indexes.shape[0]
t += 1
discrete_coords_group = torch.cat(discrete_coords_group, dim=0)
inverse_indexes_group = torch.cat(inverse_indexes_group, dim=0)
pairing_images_group = torch.cat(pairing_images_group, dim=0)
unique_feats_group = torch.cat(unique_feats_group, dim=0)
sweepIds_group = torch.cat(sweepIds_group, dim=0)
sweep_pairing_group = torch.cat(sweep_pairing_group, dim=0)
pairing_points_group = torch.cat(pairing_points_group, dim=0)
images_group = torch.cat(images_group, dim=0)
assert pairing_points_group.shape[0] == pairing_images_group.shape[0]
assert pairing_points_group.shape[0] == sweep_pairing_group.shape[0]
assert discrete_coords_group.shape[0] == sweepIds_group.shape[0]
assert inverse_indexes_group.shape[0] == inverse_indexes_merged.shape[0]
discrete_coords_group = torch.cat(
(
discrete_coords_group,
torch.zeros(discrete_coords_group.shape[0], 1, dtype=torch.int32),
),
1,
)
return (
discrete_coords_group,
unique_feats_group,
images_group,
pairing_points_group,
pairing_images_group,
inverse_indexes_group,
inverse_indexes_merged,
sweepIds_group,
sweep_pairing_group,
)
| 18,090 | 39.113082 | 147 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/model_builder.py
|
from model import (
SPVCNN,
MinkUNet,
# VoxelNet,
DilationFeatureExtractor,
PPKTFeatureExtractor,
Preprocessing,
DinoVitFeatureExtractor,
fusionNet,
maskClipFeatureExtractor,
)
def forgiving_state_restore(net, loaded_dict):
"""
Handle partial loading when some tensors don't match up in size.
Because we want to use models that were trained off a different
number of classes.
"""
loaded_dict = {
k.replace("module.", ""): v for k, v in loaded_dict.items()
}
net_state_dict = net.state_dict()
new_loaded_dict = {}
for k in net_state_dict:
new_k = k
if (
new_k in loaded_dict and net_state_dict[k].size() == loaded_dict[new_k].size()
):
new_loaded_dict[k] = loaded_dict[new_k]
else:
print("Skipped loading parameter {}".format(k))
net_state_dict.update(new_loaded_dict)
net.load_state_dict(net_state_dict)
return net
def make_model(config):
"""
Build points and image models according to what is in the config
"""
model_fusion = fusionNet(config)
# if config["model_points"] == "voxelnet":
# model_points = VoxelNet(4, config["model_n_out"], config)
# else:
# model_points = SPVCNN(1, config["model_n_out"], config)
if config['dataset'] == "scannet":
model_points = MinkUNet(3, config["model_n_out"], config)
else:
model_points = SPVCNN(1, config["model_n_out"], config)
if config["images_encoder"].find("vit_") != -1:
model_images = DinoVitFeatureExtractor(config, preprocessing=Preprocessing())
elif config["images_encoder"] == "maskclip":
# model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
model_images = maskClipFeatureExtractor(config, preprocessing=Preprocessing())
elif config["decoder"] == "dilation":
model_images = DilationFeatureExtractor(config, preprocessing=Preprocessing())
elif config["decoder"] == "ppkt":
model_images = PPKTFeatureExtractor(config, preprocessing=Preprocessing())
else:
# model with a decoder
raise Exception(f"Model not found: {config['decoder']}")
return model_points, model_images, model_fusion
| 2,258 | 34.296875 | 90 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/dataloader_nuscenes_spconv.py
|
import os
import copy
import torch
import numpy as np
from PIL import Image
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def mean_vfe(voxel_features, voxel_num_points):
# voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
voxel_features = points_mean.contiguous()
return voxel_features
def spconv_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
pc,
coords,
feats,
images,
pairing_points,
pairing_images,
num_points,
superpixels,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
pc_batch = []
offset = 0
for batch_id in range(len(pc)):
pc_batch.append(torch.cat((torch.ones((pc[batch_id].shape[0], 1)) * batch_id, pc[batch_id]), 1))
pairing_points[batch_id][:] += offset
offset += pc[batch_id].shape[0]
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, 0] = batch_id
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
pc_batch = torch.cat(pc_batch, 0)
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
superpixels_batch = torch.tensor(np.concatenate(superpixels))
num_points = torch.cat(num_points, 0)
feats_batch = mean_vfe(feats_batch, num_points)
return {
"pc": pc_batch,
"coordinates": coords_batch,
"voxels": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"num_points": num_points,
"superpixels": superpixels_batch,
}
class NuScenesMatchDatasetSpconv(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
if config["dataset"] == "nuscenes":
self.voxel_size = [0.1, 0.1, 0.2] # nuScenes
self.point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
MAX_POINTS_PER_VOXEL = 10 # nuScenes
MAX_NUMBER_OF_VOXELS = 60000 # nuScenes
self._voxel_generator = VoxelGenerator(
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
max_num_points=MAX_POINTS_PER_VOXEL,
max_voxels=MAX_NUMBER_OF_VOXELS
)
else:
raise Exception("Dataset unknown")
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
self.num_point_features = 4
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="datasets/nuscenes", verbose=False
)
self.list_keyframes = []
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
skip_counter = 0
if phase in ("train", "val", "test"):
phase_scenes = create_splits_scenes()[phase]
elif phase == "parametrizing":
phase_scenes = list(
set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
)
elif phase == "verifying":
phase_scenes = CUSTOM_SPLIT
# create a list of camera & lidar scans
for scene_idx in range(len(self.nusc.scene)):
scene = self.nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
skip_counter += 1
if skip_counter % skip_ratio == 0:
self.create_list_of_scans(scene)
def create_list_of_scans(self, scene):
# Get first and last keyframe in the scene
current_sample_token = scene["first_sample_token"]
# Loop to get all successive keyframes
list_data = []
while current_sample_token != "":
current_sample = self.nusc.get("sample", current_sample_token)
list_data.append(current_sample["data"])
current_sample_token = current_sample["next"]
# Add new scans in the list
self.list_keyframes.extend(list_data)
def map_pointcloud_to_image(self, data, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
pcl_path = os.path.join(self.nusc.dataroot, pointsensor["filename"])
pc_original = LidarPointCloud.from_file(pcl_path)
pc = pc_original.points
dist = pc[0] * pc[0] + pc[1] * pc[1]
mask = (dist <= 2621.44) & \
(pc[2] >= self.point_cloud_range[2]) & \
(pc[2] <= self.point_cloud_range[5])
pc_original = LidarPointCloud(pc[:, mask])
pc_ref = pc_original.points
images = []
superpixels = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
for i, camera_name in enumerate(camera_list):
pc = copy.deepcopy(pc_original)
cam = self.nusc.get("sample_data", data[camera_name])
im = np.array(Image.open(os.path.join(self.nusc.dataroot, cam["filename"])))
sp = Image.open(
f"superpixels/nuscenes/"
f"superpixels_{self.superpixels_type}/{cam['token']}.png"
)
superpixels.append(np.array(sp))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
cs_record = self.nusc.get(
"calibrated_sensor", pointsensor["calibrated_sensor_token"]
)
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = self.nusc.get(
"calibrated_sensor", cam["calibrated_sensor_token"]
)
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
return pc_ref.T, images, pairing_points, pairing_images, np.stack(superpixels)
def __len__(self):
return len(self.list_keyframes)
def _voxelize(self, points):
voxel_output = self._voxel_generator.generate(points.numpy())
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
return voxels, coordinates, num_points
def __getitem__(self, idx):
(
pc,
images,
pairing_points,
pairing_images,
superpixels,
) = self.map_pointcloud_to_image(self.list_keyframes[idx])
superpixels = torch.tensor(superpixels)
intensity = torch.tensor(pc[:, 3:])
pc = torch.tensor(pc[:, :3])
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
superpixels,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images, superpixels
)
pc = torch.cat((pc, intensity), 1)
voxels, coordinates, num_points = self._voxelize(pc)
discrete_coords = torch.cat(
(
torch.zeros(coordinates.shape[0], 1, dtype=torch.int32),
torch.tensor(coordinates),
),
1,
)
voxels = torch.tensor(voxels)
num_points = torch.tensor(num_points)
return (
pc,
discrete_coords,
voxels,
images,
pairing_points,
pairing_images,
num_points,
superpixels,
)
| 14,192 | 38.756303 | 114 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/lightning_trainer_spconv.py
|
import os
import re
import torch
import numpy as np
import torch.optim as optim
import pytorch_lightning as pl
from pretrain.criterion import NCELoss
from pytorch_lightning.utilities import rank_zero_only
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def interpolate_from_bev_features(keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
# voxel_size = [0.05, 0.05, 0.1] # KITTI
voxel_size = [0.1, 0.1, 0.2] # nuScenes
# point_cloud_range = np.array([0., -40., -3., 70.4, 40., 1.], dtype=np.float32) # KITTI
point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
x_idxs = (keypoints[:, 1] - point_cloud_range[0]) / voxel_size[0]
y_idxs = (keypoints[:, 2] - point_cloud_range[1]) / voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
class LightningPretrainSpconv(pl.LightningModule):
def __init__(self, model_points, model_images, config):
super().__init__()
self.model_points = model_points
self.model_images = model_images
self._config = config
self.losses = config["losses"]
self.train_losses = []
self.val_losses = []
self.num_matches = config["num_matches"]
self.batch_size = config["batch_size"]
self.num_epochs = config["num_epochs"]
self.superpixel_size = config["superpixel_size"]
self.epoch = 0
if config["resume_path"] is not None:
self.epoch = int(
re.search(r"(?<=epoch=)[0-9]+", config["resume_path"])[0]
)
self.criterion = NCELoss(temperature=config["NCE_temperature"])
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
def configure_optimizers(self):
optimizer = optim.SGD(
list(self.model_points.parameters()) + list(self.model_images.parameters()),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
def training_step(self, batch, batch_idx):
output_points = self.model_points(batch["voxels"], batch["coordinates"])
output_points = interpolate_from_bev_features(batch["pc"], output_points, self.batch_size, self.model_points.bev_stride)
self.model_images.eval()
self.model_images.decoder.train()
output_images = self.model_images(batch["input_I"])
del batch["voxels"]
del batch["coordinates"]
# each loss is applied independtly on each GPU
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=self.batch_size
)
self.train_losses.append(loss.detach().cpu())
return loss
def loss(self, batch, output_points, output_images):
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
idx = np.random.choice(pairing_points.shape[0], self.num_matches, replace=False)
k = output_points[pairing_points[idx]]
m = tuple(pairing_images[idx].T.long())
q = output_images.permute(0, 2, 3, 1)[m]
return self.criterion(k, q)
def loss_superpixels_average(self, batch, output_points, output_images):
# compute a superpoints to superpixels loss using superpixels
torch.cuda.empty_cache() # This method is extremely memory intensive
superpixels = batch["superpixels"]
pairing_images = batch["pairing_images"]
pairing_points = batch["pairing_points"]
superpixels = (
torch.arange(
0,
output_images.shape[0] * self.superpixel_size,
self.superpixel_size,
device=self.device,
)[:, None, None] + superpixels
)
m = tuple(pairing_images.cpu().T.long())
superpixels_I = superpixels.flatten()
idx_P = torch.arange(pairing_points.shape[0], device=superpixels.device)
total_pixels = superpixels_I.shape[0]
idx_I = torch.arange(total_pixels, device=superpixels.device)
with torch.no_grad():
one_hot_P = torch.sparse_coo_tensor(
torch.stack((
superpixels[m], idx_P
), dim=0),
torch.ones(pairing_points.shape[0], device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, pairing_points.shape[0])
)
one_hot_I = torch.sparse_coo_tensor(
torch.stack((
superpixels_I, idx_I
), dim=0),
torch.ones(total_pixels, device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, total_pixels)
)
k = one_hot_P @ output_points[pairing_points]
k = k / (torch.sparse.sum(one_hot_P, 1).to_dense()[:, None] + 1e-6)
q = one_hot_I @ output_images.permute(0, 2, 3, 1).flatten(0, 2)
q = q / (torch.sparse.sum(one_hot_I, 1).to_dense()[:, None] + 1e-6)
mask = torch.where(k[:, 0] != 0)
k = k[mask]
q = q[mask]
return self.criterion(k, q)
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
return super().training_epoch_end(outputs)
def validation_step(self, batch, batch_idx):
output_points = self.model_points(batch["voxels"], batch["coordinates"])
output_points = interpolate_from_bev_features(batch["pc"], output_points, self.batch_size, self.model_points.bev_stride)
self.model_images.eval()
output_images = self.model_images(batch["input_I"])
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True, batch_size=self.batch_size
)
return loss
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{
"model_points": self.model_points.state_dict(),
"model_images": self.model_images.state_dict(),
"epoch": self.epoch,
"config": self._config,
},
path,
)
| 8,642 | 35.778723 | 128 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/pc_utils.py
|
""" Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
import warnings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
import math
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
import torch
import random
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
#print loc2pc
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
#print 'pc center: ', pc_center
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
#print (i,j,k), vol[i,j,k,:,:]
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
def surface_normal_area(face, vertex):
normals = list()
areas = list()
vertex_to_face = [[] for i in range(len(vertex))]
for fid, f in enumerate(face):
f = f[0]
va, vb, vc = f[0], f[1], f[2]
vertex_to_face[va].append(fid)
vertex_to_face[vb].append(fid)
vertex_to_face[vc].append(fid)
a = vertex[vb] - vertex[va]
b = vertex[vc] - vertex[va]
normal = np.cross(a, b)
area = np.dot(normal, normal) / 2.0
normalized_normal = normal / np.linalg.norm(normal)
normals.append(normalized_normal)
areas.append(area)
return np.array(normals), np.array(areas), vertex_to_face
def vertex_normal(vertex_to_face, normal, areas):
vertex_normals = list()
num_vertex = len(vertex_to_face)
for vid in range(num_vertex):
adj_faces = vertex_to_face[vid]
if len(adj_faces)==0: # single point with no adjancy points
vertex_normals.append([0,0,1])
continue
adj_faces_area = np.expand_dims(np.array(areas[adj_faces]), axis=-1)
adj_faces_normal = np.array(normal[adj_faces])
avg_normal = (adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area)
avg_normal = np.sum(avg_normal, axis=0)
normalized_normal = avg_normal / np.linalg.norm(avg_normal)
#if np.isclose(np.linalg.norm(avg_normal), 0.0):
# print('-------------------')
# print(len(adj_faces))
# print('-------------------')
# print('-------------------')
# print(adj_faces_area.shape, adj_faces_normal.shape, adj_faces_area, adj_faces_normal)
# print(adj_faces_normal * adj_faces_area)
# print(np.sum(adj_faces_area))
# print((adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area))
# print(avg_normal, np.linalg.norm(avg_normal), adj_faces_area, adj_faces_normal)
# print('-------------------')
vertex_normals.append(normalized_normal)
return np.array(vertex_normals)
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def read_ply_rgba(filename):
""" read XYZRGBA point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
return pc_array
def read_ply_rgba_normal(filename):
""" read XYZRGBA and NxNyNz point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
face = plydata['face'].data
f_n, f_a, v_f = surface_normal_area(face, pc_array[:, 0:3])
v_n = vertex_normal(v_f, f_n, f_a)
pc_array = np.concatenate((pc_array, v_n), axis=-1)
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb_normal(points, colors, normals, filename, text=True):
""" input: Nx3, Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
normals = [(normals[i,0], normals[i,1], normals[i,2]) for i in range(normals.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
normal = np.array(normals, dtype=[('nx', 'f4'), ('ny', 'f4'),('nz', 'f4')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr + normal.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
for prop in normal.dtype.names:
vertex_all[prop] = normal[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None, colors=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
print(num_classes)
else:
assert(num_classes>np.max(labels))
if colors is None:
#colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[labels[i]]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def farthest_pts_sampling_abuse(pts, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
diff = pts[:, None, :] - pts[None, :, :]
# dis_mat = np.sum(diff * diff, axis=2)
dis_mat = np.linalg.norm(diff, axis=2)
N = num_samples
perm = np.zeros(N, dtype=np.int64)
lambdas = np.zeros(N)
ds = dis_mat[0, :]
for i in range(1, N):
idx = np.argmax(ds)
perm[i] = idx
lambdas[i] = ds[idx]
ds = np.minimum(ds, dis_mat[idx, :])
return pts[perm, :]
def farthest_pts_sampling(coords, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
pts = coords.numpy()
dis_mat = np.linalg.norm(pts, axis=2)
point_set = []
perm = np.zeros(num_samples, dtype=np.int64)
index = random.randint(0, pts.shape[0] - 1)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
for i in range(1, num_samples):
refer = pts[index]
diff = np.linalg.norm(pts[:, :] - refer[None, :], axis=1)
index = np.argmin(diff)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
point_set = np.vstack(point_set)
return point_set
def random_partition(coords):
# print('1')
mask = torch.ones(coords.size()[0]).numpy()
coords_np = coords.numpy()
sample_num = random.randint(2, 5)
random_index = np.random.randint(coords_np.shape[0], size=sample_num)
sample_points = coords_np[random_index, :]
diff = coords_np[:, None, :] - sample_points[None, :, :]
diff = np.linalg.norm(diff, axis=2)
partitions = np.argmin(diff, axis=1)
filter_ind = random.randint(0, sample_num - 1)
# coords_torch = torch.from_numpy(coords_np[partitions != filter_ind])
coords_torch = coords
mask[partitions == filter_ind] = 0
mask = torch.from_numpy(mask)
# print('4')
# part1 = torch.from_numpy(coords_np[partitions == filter_ind])
# part2 = torch.from_numpy(coords_np[partitions != filter_ind])
return coords_torch, mask
# return part1, part2
def random_rotation(coords):
# scale = torch.eye(3)*random.uniform(0.95, 1.05)
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
#
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
# def random_rotation(coords):
# return coords
def resize_rotation(coords, item):
scale = 0
if item == 'chair':
scale = torch.eye(3) * 0.8
elif item == 'sofa':
scale = torch.eye(3) * 1.75
elif item == 'table':
scale = torch.eye(3) * 1.65
elif item == 'bookshelf':
scale = torch.eye(3) * 1.7
elif item == 'desk':
scale = torch.eye(3) * 1.25
elif item == 'bed':
scale = torch.eye(3) * 2.1
elif item == 'sink':
scale = torch.eye(3) * 1.05
elif item == 'bathtub':
scale = torch.eye(3) * 1.25
elif item == 'toilet':
scale = torch.eye(3) * 0.65
elif item == 'door':
scale = torch.eye(3) * 1.8
elif item == 'curtain':
scale = torch.eye(3) * 2
else :
scale = torch.eye(3) * random.uniform(0.9, 1.75)
'''
if item == 'chair':
scale = torch.eye(3) * random.uniform(5, 5.5)
elif item == 'bed':
scale = torch.eye(3) * random.uniform(1.4, 1.6)
elif item == 'sofa':
scale = torch.eye(3) * random.uniform(9, 9.5)
elif item == 'table':
scale = torch.eye(3) * random.uniform(8, 8.5)
elif item == 'bookshelf':
scale = torch.eye(3) * random.uniform(1.1, 1.2)
elif item == 'desk':
scale = torch.eye(3) * random.uniform(7, 7.5)
elif item == 'nega_data':
scale = torch.eye(3) * random.uniform(5, 8)
'''
# theta = 0 * math.pi
# rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
# [-math.sin(theta), math.cos(theta), 0],
# [0, 0, 1]]).float()
#
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
# m = torch.matmul(scale, rotationz)
m = scale
coords = torch.matmul(coords.float(), m)
return coords
| 21,734 | 35.529412 | 104 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/__init__.py
| 0 | 0 | 0 |
py
|
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/criterion.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class NCELoss(nn.Module):
"""
Compute the PointInfoNCE loss
"""
def __init__(self, temperature):
super(NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q):
logits = torch.mm(k, q.transpose(1, 0))
# print(logits)
target = torch.arange(k.shape[0], device=k.device).long()
out = torch.div(logits, self.temperature)
out = out.contiguous()
import pdb
pdb.set_trace()
loss = self.criterion(out, target)
return loss
class semantic_NCELoss(nn.Module):
"""
Compute the PointInfoNCE loss
"""
def __init__(self, temperature):
super(semantic_NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q, pseudo_label):
logits = torch.mm(k, q.transpose(1, 0))
# print(logits)
target = torch.arange(k.shape[0], device=k.device).long()
logits = torch.div(logits, self.temperature)
# out = out.contiguous()
permute = pseudo_label.unsqueeze(-1).repeat(1, pseudo_label.shape[0])
mask = permute == permute.permute(1, 0)
mask_diag = torch.diag_embed(torch.Tensor([True] * pseudo_label.shape[0])).to(k.device).bool()
mask = mask & (~mask_diag)
logits[mask] = 0
logits_sparse = logits.to_sparse()
logits_sparse = torch.sparse.log_softmax(logits_sparse, dim=1).to_dense()
# d_sparse = d.to_sparse()
# torch.sparse.log_softmax(d_sparse, dim=0)
# torch.sparse.log_softmax(d_sparse, dim=1).to_dense()
# import pdb
# pdb.set_trace()
loss = F.nll_loss(logits_sparse, target)
# loss = self.criterion(out, target)
return loss
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s/self.T, dim=1)
p_t = F.softmax(y_t/self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0]
return loss
eps = 1e-7
class CRDLoss(nn.Module):
"""CRD Loss function
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
opt.s_dim: the dimension of student's feature
opt.t_dim: the dimension of teacher's feature
opt.feat_dim: the dimension of the projection space
opt.nce_k: number of negatives paired with each positive
opt.nce_t: the temperature
opt.nce_m: the momentum for updating the memory buffer
opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim
"""
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
"""
Args:
f_s: the feature of student network, size [batch_size, s_dim]
f_t: the feature of teacher network, size [batch_size, t_dim]
idx: the indices of these positive samples in the dataset, size [batch_size]
contrast_idx: the indices of negative samples, size [batch_size, nce_k]
Returns:
The contrastive loss
"""
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
out_s, out_t = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = s_loss + t_loss
return loss
class ContrastLoss(nn.Module):
"""
contrastive loss, corresponding to Eq (18)
"""
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class ContrastMemory(nn.Module):
"""
memory buffer that supplies large amount of negative samples.
"""
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5):
super(ContrastMemory, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, -1, -1, momentum]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
def forward(self, v1, v2, y, idx=None):
K = int(self.params[0].item())
T = self.params[1].item()
Z_v1 = self.params[2].item()
Z_v2 = self.params[3].item()
momentum = self.params[4].item()
batchSize = v1.size(0)
outputSize = self.memory_v1.size(0)
inputSize = self.memory_v1.size(1)
# original score computation
if idx is None:
idx = self.multinomial.draw(batchSize * (self.K + 1)).view(batchSize, -1)
idx.select(1, 0).copy_(y.data)
# sample
weight_v1 = torch.index_select(self.memory_v1, 0, idx.view(-1)).detach()
weight_v1 = weight_v1.view(batchSize, K + 1, inputSize)
out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1))
out_v2 = torch.exp(torch.div(out_v2, T))
# sample
weight_v2 = torch.index_select(self.memory_v2, 0, idx.view(-1)).detach()
weight_v2 = weight_v2.view(batchSize, K + 1, inputSize)
out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1))
out_v1 = torch.exp(torch.div(out_v1, T))
# set Z if haven't been set yet
if Z_v1 < 0:
self.params[2] = out_v1.mean() * outputSize
Z_v1 = self.params[2].clone().detach().item()
print("normalization constant Z_v1 is set to {:.1f}".format(Z_v1))
if Z_v2 < 0:
self.params[3] = out_v2.mean() * outputSize
Z_v2 = self.params[3].clone().detach().item()
print("normalization constant Z_v2 is set to {:.1f}".format(Z_v2))
# compute out_v1, out_v2
out_v1 = torch.div(out_v1, Z_v1).contiguous()
out_v2 = torch.div(out_v2, Z_v2).contiguous()
# update memory
with torch.no_grad():
l_pos = torch.index_select(self.memory_v1, 0, y.view(-1))
l_pos.mul_(momentum)
l_pos.add_(torch.mul(v1, 1 - momentum))
l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v1 = l_pos.div(l_norm)
self.memory_v1.index_copy_(0, y, updated_v1)
ab_pos = torch.index_select(self.memory_v2, 0, y.view(-1))
ab_pos.mul_(momentum)
ab_pos.add_(torch.mul(v2, 1 - momentum))
ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v2 = ab_pos.div(ab_norm)
self.memory_v2.index_copy_(0, y, updated_v2)
return out_v1, out_v2
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
""" Draw N samples from multinomial """
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj
| 10,649 | 33.690554 | 120 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/plyfile.py
|
# Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# <http://www.gnu.org/licenses/>.
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
('int8', 'i1'),
('char', 'i1'),
('uint8', 'u1'),
('uchar', 'b1'),
('uchar', 'u1'),
('int16', 'i2'),
('short', 'i2'),
('uint16', 'u2'),
('ushort', 'u2'),
('int32', 'i4'),
('int', 'i4'),
('uint32', 'u4'),
('uint', 'u4'),
('float32', 'f4'),
('float', 'f4'),
('float64', 'f8'),
('double', 'f8')
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for (_a, _b) in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {
'ascii': '=',
'binary_little_endian': '<',
'binary_big_endian': '>'
}
_byte_order_reverse = {
'<': 'binary_little_endian',
'>': 'binary_big_endian'
}
_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" %
(type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append('')
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
class PlyParseError(Exception):
'''
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
'''
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += 'element %r: ' % self.element.name
if self.row is not None:
s += 'row %d: ' % self.row
if self.prop:
s += 'property %r: ' % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
self.message, self.element, self.row, self.prop)
class PlyData(object):
'''
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
'''
def __init__(self, elements=[], text=False, byte_order='=',
comments=[], obj_info=[]):
'''
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
'''
if byte_order == '=' and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ['<', '>', '=']:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in
self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
'''
Parse a PLY header from a readable file-like stream.
'''
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if fields[0] == 'end_header':
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ['ply']:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != 'format':
raise PlyParseError("expected 'format'")
if lines[a][2] != '1.0':
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == 'ascii'
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]),
text, byte_order,
comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'''
Provide PLY-formatted metadata for the instance.
'''
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append('format ' +
_byte_order_reverse[self.byte_order] +
' 1.0')
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append('comment ' + c)
for c in self.obj_info:
lines.append('obj_info ' + c)
lines.extend(elt.header for elt in self.elements)
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, '
'comments=%r, obj_info=%r)' %
(self.elements, self.text, self.byte_order,
self.comments, self.obj_info))
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + 'b'))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
'''
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
'''
def __init__(self, name, properties, count, comments=[]):
'''
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
'''
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty)
for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop)
for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return [(prop.name, prop.dtype(byte_order))
for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'''
Parse a list of PLY element definitions.
'''
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'''
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
'''
a = 0
line = lines[a]
if line[0] != 'element':
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == 'comment':
comments.append(lines[a][1])
elif lines[a][0] == 'property':
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments),
lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={},
comments=[]):
'''
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
'''
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are "
"supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == 'O':
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == 'O':
if len(t) != 2:
raise ValueError("non-scalar object fields not "
"supported")
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if t[1][1] == 'O':
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'''
Read the actual data from a PLY file.
'''
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream,
self.dtype(byte_order),
self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order),
copy=False).tofile(stream)
def _read_txt(self, stream):
'''
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line",
self, k, prop)
except ValueError:
raise PlyParseError("malformed input",
self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def _write_bin(self, stream, byte_order):
'''
Save a PLY element to a binary PLY file. The element may
contain list properties.
'''
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' %
(self.name, self.properties, self.count,
self.comments))
class PlyProperty(object):
'''
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
'''
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == 'property'
if line[1] == 'list':
if len(line) > 5:
raise PlyParseError("too many fields after "
"'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after "
"'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after "
"'property'")
if len(line) < 3:
raise PlyParseError("too few fields after "
"'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'''
Return the numpy dtype description for this property (as a tuple
of strings).
'''
return byte_order + self.val_dtype
def _from_fields(self, fields):
'''
Parse from generator. Raise StopIteration if the property could
not be read.
'''
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'''
Return generator over one item.
'''
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return 'property %s %s' % (val_str, self.name)
def __repr__(self):
return 'PlyProperty(%r, %r)' % (self.name,
_lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
'''
PLY list property description.
'''
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'''
List properties always have a numpy dtype of "object".
'''
return '|O'
def list_dtype(self, byte_order='='):
'''
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
'''
return (byte_order + self.len_dtype,
byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
'''
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
'''
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return 'property list %s %s %s' % (len_str, val_str, self.name)
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' %
(self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype)))
| 26,329 | 27.744541 | 72 |
py
|
CLIP2Scene
|
CLIP2Scene-main/pretrain/dataloader_kitti.py
|
import os
import re
import torch
import numpy as np
from torch.utils.data import Dataset
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
from torchsparse import SparseTensor
from torchsparse.utils.collate import sparse_collate_fn
from torchsparse.utils.quantize import sparse_quantize
import cv2
import copy
TRAIN_SET = {0, 1, 2, 3, 4, 5, 6, 7, 9, 10}
VALIDATION_SET = {8}
TEST_SET = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
def kitti_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
# print(coords_batch.size())
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes": inverse_indexes,
}
class KittiMatchDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
Note that superpixels fonctionality have been removed.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
skip_ratio = config["dataset_skip_step"]
if phase in ("train", "parametrizing"):
phase_set = TRAIN_SET
elif phase in ("val", "verifying"):
phase_set = VALIDATION_SET
elif phase == "test":
phase_set = TEST_SET
self.list_files = []
for num in phase_set:
directory = next(
os.walk(
f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/{num:0>2d}/velodyne"
)
)
self.list_files.extend(
map(
lambda x: f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/"
f"{num:0>2d}/velodyne/" + x,
directory[2],
)
)
self.list_files = sorted(self.list_files)[::skip_ratio]
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 10: 1, 11: 2, 13: 5, 15: 3, 16: 5, 18: 4, 20: 5, 30: 6, 31: 7,
32: 8, 40: 9, 44: 10, 48: 11, 49: 12, 50: 13, 51: 14, 52: 0, 60: 9, 70: 15,
71: 16, 72: 17, 80: 18, 81: 19, 99: 0, 252: 1, 253: 7, 254: 6, 255: 8,
256: 5, 257: 5, 258: 4, 259: 5,
}
def select_points_in_frustum(self, points_2d, x1, y1, x2, y2):
"""
Select points in a 2D frustum parametrized by x1, y1, x2, y2 in image coordinates
:param points_2d: point cloud projected into 2D
:param points_3d: point cloud
:param x1: left bound
:param y1: upper bound
:param x2: right bound
:param y2: lower bound
:return: points (2D and 3D) that are in the frustum
"""
keep_ind = (points_2d[:, 0] > x1) * \
(points_2d[:, 1] > y1) * \
(points_2d[:, 0] < x2) * \
(points_2d[:, 1] < y2)
return keep_ind
def read_calib(self, calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
calib_out['P2'] = calib_all['P2'].reshape(3, 4) # 3x4 projection matrix for left camera
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out
def map_pointcloud_to_image(self, ann_info, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
# pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
points = np.fromfile(ann_info, dtype=np.float32).reshape((-1, 4))
pc_ref = copy.deepcopy(points)
path_splits = ann_info.split('/')
calib_path = os.path.join("/mnt/lustre/share_data/liuyouquan/semantickitti/sequences",path_splits[-3], "calib.txt")
image_path = os.path.join("/mnt/lustre/share_data/chenrunnan/dataset/sequences/",path_splits[-3],"image_2", path_splits[-1].replace("bin", "png"))
image = cv2.imread(image_path)
image = cv2.resize(image, (1241, 376), interpolation=cv2.INTER_LINEAR)
calib = self.read_calib(calib_path)
proj_matrix = calib['P2'] @ calib['Tr']
proj_matrix = proj_matrix.astype(np.float32)
# project points into image
keep_idx = points[:, 0] > 0 # only keep point in front of the vehicle
points_hcoords = np.concatenate([points[:, :3], np.ones([len(points), 1], dtype=np.float32)], axis=1)
img_points = (proj_matrix @ points_hcoords.T).T
matching_pixel = img_points[:, :2] / np.expand_dims(img_points[:, 2], axis=1) # scale 2D points
# print(img_points)
keep_idx_img_pts = self.select_points_in_frustum(matching_pixel, 0, 0, 1241, 376)
# print(keep_idx)
keep_idx = keep_idx_img_pts & keep_idx
# print(sum(keep_idx))
# print("+"*90)
matching_pixel = matching_pixel[keep_idx]
# cv2.namedWindow('win', cv2.WINDOW_NORMAL)
# for i in range(len(matching_pixel)):
# cv2.circle(image, (int(matching_pixel[i][0]), int(matching_pixel[i][1])), 1, (255, 255, 0), -1)
# cv2.imwrite('./vis.png',image)
# points_h = points[keep_idx]
pairing_points = np.where(keep_idx==True)[0]
pairing_images = np.concatenate(
(
np.zeros((matching_pixel.shape[0], 1), dtype=np.int64),
matching_pixel,
),
axis=1,
)
assert pairing_images.shape[1] == 3
images = [image / 255]
return pc_ref, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_files)
def __getitem__(self, idx):
lidar_file = self.list_files[idx]
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(lidar_file)
# points = np.fromfile(lidar_file, dtype=np.float32).reshape((-1, 4))
# get the points (4th coordinate is the point intensity)
intensity = torch.tensor(pc[:, 3:] + 1.)
pc = torch.tensor(pc[:, :3])
# print("pairing_points size: ", pairing_points.shape)
# print("pairing_images size: ", pairing_images.shape)
# print("images size: ", images[0].shape)
# print("pc size: ", pc.shape)
# images size: (900, 1600, 3)
# pc size: torch.Size([34688, 3])
# pairing_points size: (22585,)
# pairing_images size: (22585, 3)
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
# apply the transforms (augmentation)
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images
)
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug, return_index=True, return_inverse=True
# )
discrete_coords, indexes, inverse_indexes = sparse_quantize(coords_aug.numpy(),
return_index=True,
return_inverse=True)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
unique_feats = intensity[indexes]
discrete_coords = torch.cat(
(
discrete_coords,
torch.zeros(discrete_coords.shape[0], 1, dtype=torch.int32),
),
1,
)
return (
discrete_coords,
unique_feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
)
| 10,972 | 34.282958 | 154 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/dataloader_scannet.py
|
import os
import copy
import torch
import numpy as np
from PIL import Image
import MinkowskiEngine as ME
from torch.utils.data import Dataset
# import pc_utils
from plyfile import PlyData, PlyElement
import math
# from pc_utils import write_ply_rgb
import sys
sys.path.append("..")
# from MinkowskiEngine.utils import sparse_quantize
import imageio
import cv2
import random
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
colors = [(colors[i, 0], colors[i, 1], colors[i, 2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def scannet_collate_pair_fn(batch):
(
pc,
coords,
feats,
unique_labels,
labels,
inverse_indexes,
scan_names,
) = list(zip(*batch))
len_batch = []
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
len_batch.append(N)
coords = ME.utils.batched_coordinates(coords, dtype=torch.float32)
feats = torch.cat(feats, dim=0)
# imgs = torch.cat(imgs, dim=0)
unique_labels = torch.cat(unique_labels, 0).long()
return {
"pc": pc, # point cloud
"sinput_C": coords, # discrete coordinates (ME)
"sinput_F": feats, # point features (N, 3)
# "input_I": imgs,
"len_batch": len_batch,
"labels": unique_labels,
"evaluation_labels": labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
"lidar_name": scan_names
}
class scannet_Dataset(Dataset):
def __init__(self, phase, config, transforms=None):
self.scannet_root_dir = config['dataRoot_scannet']
if phase == 'train':
self.scannet_file_list = self.read_files(config['train_file'])
skip_ratio = config["dataset_skip_step"]
print("before: ", len(self.scannet_file_list))
self.scannet_file_list = sorted(self.scannet_file_list)[::skip_ratio]
print("after: ", len(self.scannet_file_list))
else:
self.scannet_file_list = self.read_files(config['val_file'])
self.voxel_size = config['voxel_size']
self.phase = phase
self.config = config
self.imageDim = (640, 480)
self.transforms = transforms
self.maxImages = 8
def read_files(self, file):
f = open(file)
lines = f.readlines()
name_list = [line.split('.')[0] for line in lines]
f.close()
return name_list
def __len__(self):
return len(self.scannet_file_list)
def read_pose_file(self, fname):
posemat = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return posemat
def read_intrinsic_file(self, fname):
intrinsic = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return intrinsic
def read_txt(self, path):
# Read txt file into lines.
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def computeLinking(self, camera_to_world, coords, depth, link_proj_threshold, intrinsic_color, intrinsic_depth, imageDim):
"""
:param camera_to_world: 4 x 4
:param coords: N x 3 format
:param depth: H x W format
:intrinsic_depth: 4 x 4
:intrinsic_color: 4 x 4, not used currently
:return: linking, N x 3 format, (H,W,mask)
"""
# print("imageDim ", imageDim)
intrinsic = intrinsic_depth
link = np.zeros((3, coords.shape[0]), dtype=float)
coordsNew = np.concatenate([coords, np.ones([coords.shape[0], 1])], axis=1).T #4 x N
assert coordsNew.shape[0] == 4, "[!] Shape error"
world_to_camera = np.linalg.inv(camera_to_world) # 4 x 4
p = np.matmul(world_to_camera, coordsNew) # 4 x N
p[0] = (p[0] * intrinsic[0][0]) / p[2] + intrinsic[0][2]
p[1] = (p[1] * intrinsic[1][1]) / p[2] + intrinsic[1][2]
pi = p
inside_mask = (pi[0] >= 0) * (pi[1] >= 0) * (pi[0] <= imageDim[1] - 1) * (pi[1] <= imageDim[0]-1)
occlusion_mask = np.abs(depth[np.round(pi[1][inside_mask]).astype(np.int), np.round(pi[0][inside_mask]).astype(np.int)] - p[2][inside_mask]) <= link_proj_threshold
inside_mask[inside_mask == True] = occlusion_mask
link[0][inside_mask] = pi[1][inside_mask]
link[1][inside_mask] = pi[0][inside_mask]
link[2][inside_mask] = 1
return link.T
def __getitem__(self, idx):
# _new_semantic.npy: 0~19, .npy: 1~20
path = os.path.join(self.scannet_root_dir, self.scannet_file_list[idx], self.scannet_file_list[idx]+"_new_semantic.npy")
# path = os.path.join(self.scannet_root_dir, self.file_list[idx], self.file_list[idx]+".npy")
data = torch.from_numpy(np.load(path))
coords, feats, labels = data[:, :3], data[:, 3: 6], data[:, -1]
labels[labels == -100] = -1
labels += 1
pc = coords.clone()
# coords, labels = data[:, :3], data[:, 9:]
# sceneName = self.scannet_file_list[idx]
# write_ply_rgb(coords, feats, "visual/visual_%s.ply" % sceneName)
feats = feats / 127.5 - 1
coords = (coords - coords.mean(0)) / self.voxel_size
# print(feats)
# feats = torch.ones(len(coords), 1)
# frame_names = []
# imgs = []
# links = []
#
# intrinsic_color = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_color.txt'))
# intrinsic_depth = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_depth.txt'))
#
# for framename in os.listdir(os.path.join(self.config['dataRoot_images'], sceneName, 'color')):
# frame_names.append(framename.split('.')[0])
#
# pairing_points = []
# pairing_images = []
#
# frame_names = random.sample(frame_names, min(self.maxImages, len(frame_names)))
#
# for i, frameid in enumerate(frame_names):
# f = os.path.join(self.config['dataRoot_images'], sceneName, 'color', frameid + '.jpg')
# img = imageio.imread(f) / 255
# # print("before ", img.shape)
# img = cv2.resize(img, self.imageDim)
# # print("after ", img.shape)
# # images.append(im / 255)
# depth = imageio.imread(f.replace('color', 'depth').replace('.jpg', '.png')) / 1000.0 # convert to meter
# posePath = f.replace('color', 'pose').replace('.jpg', '.txt')
# pose = self.read_pose_file(posePath)
#
# # ply_filename = os.path.join('%s_vh_clean_2.ply' % (sceneName))
# # label_filename = os.path.join('%s_vh_clean_2.labels.ply' % (sceneName))
#
# # print("depth", depth.shape)
# # print("img", img.shape)
#
# # link = np.ones([coords.shape[0], 3])
# link = self.computeLinking(pose, coords, depth, 0.05, intrinsic_color, intrinsic_depth, depth.shape)
#
# pairing_point = torch.from_numpy(np.argwhere(link[:, 2] == 1)).squeeze()
# pairing_points.append(pairing_point)
#
# link = torch.from_numpy(link).int()
# # link_index = link[:, 2] == 1
#
# imgs.append(torch.from_numpy(img.transpose((2, 0, 1))))
#
# pairing_image = link[pairing_point, :2]
# pairing_images.append(torch.cat((torch.ones(pairing_point.shape[0], 1) * i,
# pairing_image), dim=1))
'''
# print image-point correspondence
img_pixel = tuple(pairing_image.T.long())
img_RGB = img[img_pixel]
print(coords[pairing_point].shape, "img_RGB ", img_RGB.shape)
write_ply_rgb(coords[pairing_point], img_RGB*255, "visual/visual_%s_%s.ply" % (frameid, i))
'''
# imgs = torch.stack(imgs)
# pairing_points = torch.cat(pairing_points, dim=0)
# pairing_images = torch.cat(pairing_images, dim=0)
if self.transforms:
coords = self.transforms(coords.float())
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# indexes here are the indexes of points kept after the voxelization
# pairing_points = inverse_indexes[pairing_points]
unique_labels = labels[indexes]
feats = feats[indexes]
# assert pairing_points.shape[0] == pairing_images.shape[0]
packages = (pc, discrete_coords, feats, unique_labels, labels, inverse_indexes, self.scannet_file_list[idx])
return packages
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
This function is not used with pytorch lightning, but is used when evaluating.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = scannet_Dataset(phase=phase, transforms=transforms, config=config)
collate_fn = scannet_collate_pair_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader
| 10,704 | 35.660959 | 171 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/lightning_datamodule.py
|
import torch
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from utils.transforms import make_transforms_clouds
from downstream.dataloader_kitti import SemanticKITTIDataset
from downstream.dataloader_nuscenes import NuScenesDataset, custom_collate_fn
from downstream.dataloader_scannet import scannet_Dataset, scannet_collate_pair_fn
class DownstreamDataModule(pl.LightningDataModule):
"""
The equivalent of a DataLoader for pytorch lightning.
"""
def __init__(self, config):
super().__init__()
self.config = config
# in multi-GPU the actual batch size is that
self.batch_size = config["batch_size"] // config["num_gpus"]
# the CPU workers are split across GPU
self.num_workers = max(config["num_threads"] // config["num_gpus"], 1)
def setup(self, stage):
# setup the dataloader: this function is automatically called by lightning
transforms = make_transforms_clouds(self.config)
if self.config["dataset"].lower() == "nuscenes":
Dataset = NuScenesDataset
elif self.config["dataset"].lower() == "scannet":
Dataset = scannet_Dataset
elif self.config["dataset"].lower() in ("kitti", "semantickitti"):
Dataset = SemanticKITTIDataset
else:
raise Exception(f"Unknown dataset {self.config['dataset']}")
if self.config["training"] in ("parametrize", "parametrizing"):
phase_train = "parametrizing"
phase_val = "verifying"
else:
phase_train = "train"
phase_val = "val"
self.train_dataset = Dataset(
phase=phase_train, transforms=transforms, config=self.config
)
if Dataset == NuScenesDataset:
self.val_dataset = Dataset(
phase=phase_val,
config=self.config,
cached_nuscenes=self.train_dataset.nusc,
)
else:
self.val_dataset = Dataset(phase=phase_val, config=self.config)
def train_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=True,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
def val_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
#
# def train_dataloader(self):
# # construct the training dataloader: this function is automatically called
# # by lightning
# return DataLoader(
# self.train_dataset,
# batch_size=self.batch_size,
# shuffle=True,
# num_workers=self.num_workers,
# collate_fn=custom_collate_fn,
# pin_memory=True,
# drop_last=False,
# worker_init_fn=lambda id: np.random.seed(
# torch.initial_seed() // 2 ** 32 + id
# ),
# )
#
# def val_dataloader(self):
# # construct the validation dataloader: this function is automatically called
# # by lightning
# return DataLoader(
# self.val_dataset,
# batch_size=self.batch_size,
# shuffle=False,
# num_workers=self.num_workers,
# collate_fn=custom_collate_fn,
# pin_memory=True,
# drop_last=False,
# worker_init_fn=lambda id: np.random.seed(
# torch.initial_seed() // 2 ** 32 + id
# ),
# )
| 5,158 | 35.85 | 86 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/evaluate.py
|
import numpy as np
import torch
from tqdm import tqdm
from copy import deepcopy
from MinkowskiEngine import SparseTensor
# from torchsparse import SparseTensor
from utils.metrics import compute_IoU
CLASSES_NUSCENES = [
"barrier",
"bicycle",
"bus",
"car",
"construction_vehicle",
"motorcycle",
"pedestrian",
"traffic_cone",
"trailer",
"truck",
"driveable_surface",
"other_flat",
"sidewalk",
"terrain",
"manmade",
"vegetation",
]
CLASSES_KITTI = [
"car",
"bicycle",
"motorcycle",
"truck",
"other-vehicle",
"person",
"bicyclist",
"motorcyclist",
"road",
"parking",
"sidewalk",
"other-ground",
"building",
"fence",
"vegetation",
"trunk",
"terrain",
"pole",
"traffic-sign",
]
CLASSES_scannet = [
'wall',
'floor',
'cabinet',
'bed',
'chair',
'sofa',
'table',
'door',
'window',
'bookshelf',
'picture',
'counter',
'desk',
'curtain',
'refrigerator',
'shower curtain',
'toilet',
'sink',
'bathtub',
'other furniture'
]
def evaluate(model, dataloader, config):
"""
Function to evaluate the performances of a downstream training.
It prints the per-class IoU, mIoU and fwIoU.
"""
model.eval()
with torch.no_grad():
i = 0
full_predictions = []
ground_truth = []
for batch in tqdm(dataloader):
lidar_names = batch["lidar_name"]
sparse_input = SparseTensor(batch["sinput_F"].float(), batch["sinput_C"].int(), device=0)
# print(sparse_input, model)
output_points = model(sparse_input)
# for spvcnn
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
# output_points = model(sparse_input.to(0))
if config["ignore_index"]:
output_points[:, config["ignore_index"]] = -1e6
torch.cuda.empty_cache()
preds = output_points.argmax(1).cpu()
offset = 0
# print(output_points)
# print(batch["evaluation_labels"][0].max())
# print(batch["evaluation_labels"][0].min())
for j, lb in enumerate(batch["len_batch"]):
# print(batch["len_batch"], j)
inverse_indexes = batch["inverse_indexes"][j]
predictions = preds[inverse_indexes + offset]
# print(predictions.shape, batch["evaluation_labels"][j].shape)
# remove the ignored index entirely
full_predictions.append(predictions)
ground_truth.append(deepcopy(batch["evaluation_labels"][j]))
offset += lb
# m_IoU, fw_IoU, per_class_IoU = compute_IoU(
# torch.cat([predictions]),
# torch.cat([deepcopy(batch["evaluation_labels"][j])]),
# config["model_n_out"],
# ignore_index=0,
# )
'''
class_ind = 4
lidar_name = lidar_names[j].split('/')[-1]
root_path = '/mnt/lustre/chenrunnan/projects/SLidR/visual/annotation_free/'
# lidar_name_path = root_path + str(per_class_IoU[class_ind]) + lidar_name
lidar_name_path = root_path + lidar_name
save_file = predictions.unsqueeze(-1).numpy()
# save_file = np.expand_dims(predictions)
# if per_class_IoU[class_ind] != 1 and per_class_IoU[class_ind] > 0.4:
np.array(save_file).astype(np.uint8).tofile(lidar_name_path)
'''
# import pdb
# pdb.set_trace()
i += j
full_predictions = torch.cat(full_predictions).int()
ground_truth = torch.cat(ground_truth).int()
# if config["dataset"].lower() == "scannet":
# ground_truth += 1
# ground_truth[ground_truth == -99] = 0
# print(full_predictions.shape, torch.cat(ground_truth).shape)
# print(torch.cat(full_predictions), torch.cat(ground_truth))
print(ground_truth)
m_IoU, fw_IoU, per_class_IoU = compute_IoU(
full_predictions,
ground_truth,
config["model_n_out"],
ignore_index=0,
)
# import pdb
# pdb.set_trace()
print("Per class IoU:")
if config["dataset"].lower() == "nuscenes":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_NUSCENES, (per_class_IoU).numpy())
],
sep="\n",
)
elif config["dataset"].lower() == "kitti":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_KITTI, (per_class_IoU).numpy())
],
sep="\n",
)
elif config["dataset"].lower() == "scannet":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_scannet, (per_class_IoU).numpy())
],
sep="\n",
)
print()
print(f"mIoU: {m_IoU}")
print(f"fwIoU: {fw_IoU}")
return m_IoU
| 5,359 | 26.628866 | 101 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/lightning_trainer.py
|
import os
import torch
import torch.optim as optim
import pytorch_lightning as pl
from MinkowskiEngine import SparseTensor
# from torchsparse import SparseTensor
from downstream.criterion import DownstreamLoss, unknown_aware_infoNCE
from pytorch_lightning.utilities import rank_zero_only
from utils.metrics import confusion_matrix, compute_IoU_from_cmatrix
import MinkowskiEngine as ME
class LightningDownstream(pl.LightningModule):
def __init__(self, model, config):
super().__init__()
self.model = model
self.best_mIoU = 0.0
self.metrics = {"val mIoU": [], "val_loss": [], "train_loss": []}
self._config = config
self.train_losses = []
self.val_losses = []
self.ignore_index = config["ignore_index"]
self.n_classes = config["model_n_out"]
self.num_epochs = config["num_epochs"]
self.epoch = 0
if config["loss"].lower() == "lovasz":
self.criterion = DownstreamLoss(
ignore_index=config["ignore_index"],
device=self.device,
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
ignore_index=config["ignore_index"],
)
self.mode = config["mode"]
# if self.mode == 'source_free':
# self.num_epochs = 0
if self.mode == 'zero_shot':
self.criterion = unknown_aware_infoNCE(ignore_index=config["ignore_index"], config=config)
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
def configure_optimizers(self):
if self._config.get("lr_head", None) is not None:
print("Use different learning rates between the head and trunk.")
def is_final_head(key):
return key.find('final.') != -1
param_group_head = [
param for key, param in self.model.named_parameters()
if param.requires_grad and is_final_head(key)]
param_group_trunk = [
param for key, param in self.model.named_parameters()
if param.requires_grad and (not is_final_head(key))]
param_group_all = [
param for key, param in self.model.named_parameters()
if param.requires_grad]
assert len(param_group_all) == (len(param_group_head) + len(param_group_trunk))
weight_decay = self._config["weight_decay"]
weight_decay_head = self._config["weight_decay_head"] if (self._config["weight_decay_head"] is not None) else weight_decay
parameters = [
{"params": iter(param_group_head), "lr": self._config["lr_head"], "weight_decay": weight_decay_head},
{"params": iter(param_group_trunk)}]
print(f"==> Head: #{len(param_group_head)} params with learning rate: {self._config['lr_head']} and weight_decay: {weight_decay_head}")
print(f"==> Trunk: #{len(param_group_trunk)} params with learning rate: {self._config['lr']} and weight_decay: {weight_decay}")
optimizer = optim.SGD(
parameters,
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
else:
if self._config.get("optimizer") and self._config["optimizer"] == 'adam':
print('Optimizer: AdamW')
optimizer = optim.AdamW(
self.model.parameters(),
lr=self._config["lr"],
weight_decay=self._config["weight_decay"],
)
else:
print('Optimizer: SGD')
optimizer = optim.SGD(
self.model.parameters(),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
if self._config.get("scheduler") and self._config["scheduler"] == 'steplr':
print('Scheduler: StepLR')
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, int(.9 * self._config["num_epochs"]),
)
else:
print('Scheduler: Cosine')
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer, self._config["num_epochs"]
)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
# set_to_none=True is a modest speed-up
optimizer.zero_grad(set_to_none=True)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
if self._config["freeze_layers"]:
self.model.eval()
else:
self.model.train()
sparse_input = ME.SparseTensor(batch["sinput_F"].float(), coordinates=batch["sinput_C"].int())
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
output_points = self.model(sparse_input)
# print(output_points.shape, batch["labels"].shape, "=================================")
loss = self.criterion(output_points, batch["labels"])
# if self.mode == 'source_free':
# empty the cache to reduce the memory requirement: ME is known to slowly
# filling the cache otherwise
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True
)
self.train_losses.append(loss.detach().cpu())
return loss
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
def validation_step(self, batch, batch_idx):
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
sparse_input = ME.SparseTensor(batch["sinput_F"].float(), coordinates=batch["sinput_C"].int())
output_points = self.model(sparse_input)
loss = self.criterion(output_points, batch["labels"])
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True
)
# Ensure we ignore the index 0
# (probably not necessary after some training)
output_points = output_points.softmax(1)
if self.ignore_index is not None:
output_points[:, self.ignore_index] = 0.0
preds = []
labels = []
offset = 0
output_points = output_points.argmax(1)
for i, lb in enumerate(batch["len_batch"]):
preds.append(output_points[batch["inverse_indexes"][i] + offset])
labels.append(batch["evaluation_labels"][i])
offset += lb
preds = torch.cat(preds, dim=0).int()
labels = torch.cat(labels, dim=0).int()
c_matrix = confusion_matrix(preds, labels, self.n_classes)
return loss, c_matrix
def validation_epoch_end(self, outputs):
c_matrix = sum([o[1] for o in outputs])
# remove the ignore_index from the confusion matrix
c_matrix = torch.sum(self.all_gather(c_matrix), 0)
m_IoU, fw_IoU, per_class_IoU = compute_IoU_from_cmatrix(
c_matrix, self.ignore_index
)
self.train_losses = []
self.val_losses = []
self.log("m_IoU", m_IoU, prog_bar=True, logger=True, sync_dist=False)
self.log("fw_IoU", fw_IoU, prog_bar=True, logger=True, sync_dist=False)
if self.epoch == self._config["num_epochs"]:
self.save()
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{"model_points": self.model.state_dict(), "config": self._config}, path
)
| 8,081 | 40.446154 | 148 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/dataloader_nuscenes.py
|
import os
import torch
import numpy as np
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
# from torchsparse.utils.quantize import sparse_quantize
# from petrel_client.client import Client
import json
# parametrizing set, to try out different parameters
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def custom_collate_fn(list_data):
"""
Custom collate function adapted for creating batches with MinkowskiEngine.
"""
input = list(zip(*list_data))
# whether the dataset returns labels
labelized = len(input) == 7
# evaluation_labels are per points, labels are per voxels
if labelized:
xyz, coords, feats, labels, evaluation_labels, inverse_indexes, lidar_name = input
else:
xyz, coords, feats, inverse_indexes = input
# for names
# name_list = []
# print(feats[0].size())
coords_batch, len_batch = [], []
# create a tensor of coordinates of the 3D points
# note that in ME, batche index and point indexes are collated in the same dimension
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
coords_batch.append(
torch.cat((coo, torch.ones(N, 1, dtype=torch.int32) * batch_id), 1)
)
len_batch.append(N)
# for batch_id, coo in enumerate(coords):
# N = coords[batch_id].shape[0]
# coords_batch.append(
# torch.cat((torch.ones(N, 1, dtype=torch.int32) * batch_id, coo), 1)
# )
# len_batch.append(N)
# Collate all lists on their first dimension
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats, 0).float()
if labelized:
labels_batch = torch.cat(labels, 0).long()
return {
"pc": xyz, # point cloud
"sinput_C": coords_batch, # discrete coordinates (ME)
"sinput_F": feats_batch, # point features (N, 3)
"len_batch": len_batch, # length of each batch
"labels": labels_batch, # labels for each (voxelized) point
"evaluation_labels": evaluation_labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
"lidar_name": lidar_name
}
else:
return {
"pc": xyz,
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"len_batch": len_batch,
"inverse_indexes": inverse_indexes,
}
class NuScenesDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
"""
def __init__(self, phase, config, transforms=None, cached_nuscenes=None):
self.phase = phase
self.labels = self.phase != "test"
self.transforms = transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
if phase != "test":
if cached_nuscenes is not None:
self.nusc = cached_nuscenes
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="s3://liuyouquan/nuScenes/", verbose=False
)
else:
self.nusc = NuScenes(
version="v1.0-test", dataroot="s3://liuyouquan/nuScenes/", verbose=False
)
self.list_tokens = []
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
if phase in ("val", "verifying"):
skip_ratio = 1
else:
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
self.dataroot = "s3://liuyouquan/nuScenes" #todo
# self.client = Client('~/.petreloss.conf')
# if phase in ("train", "val", "test"):
# phase_scenes = create_splits_scenes()[phase]
# elif phase == "parametrizing":
# phase_scenes = list(
# set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
# )
# elif phase == "verifying":
# phase_scenes = CUSTOM_SPLIT
if phase == "train":
with open('./list_keyframes_train.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_train.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "val":
with open('./list_keyframes_val.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_val.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "test":
with open('./list_keyframes_test.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_test.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "parametrizing":
with open('./list_keyframes_parametrizing.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_parametrizing.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
elif phase == "verifying":
with open('./list_keyframes_verifying.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_verifying.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
print("before: ", len(self.list_keyframes))
self.list_keyframes = self.list_keyframes[::skip_ratio]
print("after: ", len(self.list_keyframes))
# skip_counter = 0
# create a list of all keyframe scenes
# for scene_idx in range(len(self.nusc.scene)):
# scene = self.nusc.scene[scene_idx]
# if scene["name"] in phase_scenes:
# skip_counter += 1
# if skip_counter % skip_ratio == 0:
# self.create_list_of_tokens(scene)
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 2: 7, 3: 7, 4: 7, 5: 0, 6: 7, 7: 0, 8: 0, 9: 1, 10: 0, 11: 0,
12: 8, 13: 0, 14: 2, 15: 3, 16: 3, 17: 4, 18: 5, 19: 0, 20: 0, 21: 6, 22: 9,
23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 0, 30: 16, 31: 0,
}
# def create_list_of_tokens(self, scene):
# # Get first in the scene
# current_sample_token = scene["first_sample_token"]
#
# # Loop to get all successive keyframes
# while current_sample_token != "":
# current_sample = self.nusc.get("sample", current_sample_token)
# next_sample_token = current_sample["next"]
# self.list_tokens.append(current_sample["data"]["LIDAR_TOP"])
# current_sample_token = next_sample_token
def __len__(self):
return len(self.list_keyframes)
def __getitem__(self, idx):
lidar_token = self.list_keyframes[idx]
key_ = lidar_token["LIDAR_TOP"]
pcl_path = self.dataroot + self.frames_corrs_info[key_]["lidar_name"].replace("samples", "")
# pc_original = LidarPointCloud.from_file(pcl_path)
# pc_ref = pc_original.points
# pointsensor = self.nusc.get("sample_data", lidar_token)
# pcl_path = os.path.join(self.nusc.dataroot, pointsensor["filename"])
points = LidarPointCloud.from_file(pcl_path).points.T
# get the points (4th coordinate is the point intensity)
pc = points[:, :3]
if self.labels:
# lidarseg_labels_filename = os.path.join(
# self.nusc.dataroot, self.nusc.get("lidarseg", lidar_token)["filename"]
# )
lidarseg_labels_filename = self.dataroot + "/" + self.frames_corrs_info[key_]["labels_name"]
points_labels = np.fromfile(lidarseg_labels_filename, dtype=np.uint8)
# points_labels = np.frombuffer(self.client.get(lidarseg_labels_filename, update_cache=True), dtype=np.uint8)
pc = torch.tensor(pc)
# apply the transforms (augmentation)
if self.transforms:
pc = self.transforms(pc)
if self.cylinder:
# Transform to cylinder coordinate and scale for given voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization for spvcnn
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug.numpy(), return_index=True, return_inverse=True
# )
# discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# use those voxels features
unique_feats = torch.tensor(points[indexes][:, 3:])
# print(((unique_feats) != 0).sum() / unique_feats.shape[0])
if self.labels:
points_labels = torch.tensor(
np.vectorize(self.eval_labels.__getitem__)(points_labels),
dtype=torch.int32,
)
unique_labels = points_labels[indexes]
lidar_name = self.frames_corrs_info[key_]["labels_name"]
if self.labels:
return (
pc,
discrete_coords,
unique_feats,
unique_labels,
points_labels,
inverse_indexes,
lidar_name,
)
else:
return pc, discrete_coords, unique_feats, inverse_indexes
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
This function is not used with pytorch lightning, but is used when evaluating.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = NuScenesDataset(phase=phase, transforms=transforms, config=config)
collate_fn = custom_collate_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader
| 12,825 | 37.866667 | 149 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/model_builder.py
|
import torch
from model import MinkUNet, SPVCNN
def load_state_with_same_shape(model, weights):
"""
Load common weights in two similar models
(for instance between a pretraining and a downstream training)
"""
model_state = model.state_dict()
if list(weights.keys())[0].startswith("model."):
weights = {k.partition("model.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("model_points."):
weights = {k.partition("model_points.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("module."):
print("Loading multigpu weights with module. prefix...")
weights = {k.partition("module.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("encoder."):
print("Loading multigpu weights with encoder. prefix...")
weights = {k.partition("encoder.")[2]: weights[k] for k in weights.keys()}
filtered_weights = {
k: v
for k, v in weights.items()
if (k in model_state and v.size() == model_state[k].size())
}
removed_weights = {
k: v
for k, v in weights.items()
if not (k in model_state and v.size() == model_state[k].size())
}
print("Loading weights:" + ", ".join(filtered_weights.keys()))
print("")
print("Not loading weights:" + ", ".join(removed_weights.keys()))
return filtered_weights
def make_model(config, load_path=None):
"""
Build the points model according to what is in the config
"""
assert not config[
"normalize_features"
], "You shouldn't normalize features for the downstream task"
# model = MinkUNet(1, config["model_n_out"], config)
# model = SPVCNN(1, config["model_n_out"], config)
model = MinkUNet(3, config["model_n_out"], config)
if load_path:
print("Training with pretrained model")
checkpoint = torch.load(load_path, map_location="cpu")
if "config" in checkpoint:
for cfg in ("voxel_size", "cylindrical_coordinates"):
assert checkpoint["config"][cfg] == config[cfg], (
f"{cfg} is not consistant. "
f"Checkpoint: {checkpoint['config'][cfg]}, "
f"Config: {config[cfg]}."
)
if set(checkpoint.keys()) == set(["epoch", "model", "optimizer", "train_criterion"]):
print("Pre-trained weights are coming from DepthContrast.")
pretraining_epochs = checkpoint["epoch"]
print(f"==> Number of pre-training epochs {pretraining_epochs}")
checkpoint = checkpoint["model"]
if list(checkpoint.keys())[0].startswith("module."):
print("Loading multigpu weights with module. prefix...")
checkpoint = {k.partition("module.")[2]: checkpoint[k] for k in checkpoint.keys()}
voxel_net_suffix = "trunk.2."
checkpoint = {
key.partition(voxel_net_suffix)[2]: checkpoint[key]
for key in checkpoint.keys() if key.startswith(voxel_net_suffix)
}
print(f"==> Number of loaded weight blobs {len(checkpoint)}")
checkpoint = {"model_points": checkpoint}
key = "model_points" if "model_points" in checkpoint else "state_dict"
filtered_weights = load_state_with_same_shape(model, checkpoint[key])
model_dict = model.state_dict()
model_dict.update(filtered_weights)
model.load_state_dict(model_dict)
if config["freeze_layers"]:
for param in list(model.parameters())[:-2]:
param.requires_grad = False
return model
| 3,677 | 41.275862 | 98 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/__init__.py
| 0 | 0 | 0 |
py
|
|
CLIP2Scene
|
CLIP2Scene-main/downstream/criterion.py
|
"""
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
https://github.com/edwardzhou130/PolarSeg/blob/master/network/lovasz_losses.py
"""
from __future__ import print_function, division
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
# import evaluate
from .evaluate import CLASSES_NUSCENES
from .evaluate import CLASSES_KITTI
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
# The ignored label is sometimes among predicted classes
if i != ignore:
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
# mean accross images if per_image
ious = [mean(iou) for iou in zip(*ious)]
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
lovasz_hinge_flat(
*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)
)
for log, lab in zip(logits, labels)
)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = -input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel
(between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
class DownstreamLoss(nn.Module):
"""
Custom which is the sum of a lovasz loss and a crossentropy.
Main class to instantiate in the code.
"""
def __init__(self, weights=None, ignore_index=None, device="cpu"):
super(DownstreamLoss, self).__init__()
self.ignore_index = ignore_index
if weights is None:
self.crossentropy = torch.nn.CrossEntropyLoss()
else:
self.crossentropy = torch.nn.CrossEntropyLoss(
weight=torch.tensor(weights).float().to(device)
)
def forward(self, probas, labels):
if self.ignore_index is not None:
valid = labels != self.ignore_index
probas = probas[valid]
labels = labels[valid]
loss1 = self.crossentropy(probas, labels)
loss2 = lovasz_softmax_flat(probas.softmax(-1), labels)
return loss1 + loss2
class unknown_aware_infoNCE(nn.Module):
"""
Custom which is the sum of a lovasz loss and a crossentropy.
Main class to instantiate in the code.
"""
def __init__(self, ignore_index=None, config=None):
super(unknown_aware_infoNCE, self).__init__()
self.ignore_index = ignore_index
# self.seen_classes =
self.unseen_classes = ['motorcycle', 'trailer', 'terrain', 'traffic_cone']
self.CLASS_LABELS = CLASSES_NUSCENES
if config['dataset'] == 'kitti':
self.CLASS_LABELS = CLASSES_KITTI
self.seen_class_index = list(range(len(self.CLASS_LABELS)))
for item in self.unseen_classes:
index = self.CLASS_LABELS.index(item)
# self.unseen_index.append(index)
self.seen_class_index.remove(index)
self.crossentropy = torch.nn.CrossEntropyLoss()
def pseudo_supervised(self, predictions):
if predictions.size()[0] == 0: return 0
predictions = torch.softmax(predictions, dim=1)
loss = torch.mean(torch.sum(predictions[:, self.seen_class_index], dim=1))
# loss += torch.mean(1 - torch.sum(predictions[:, self.unseen_index], dim=1))
return loss
def forward(self, probas, labels):
for item in self.unseen_classes:
index = self.CLASS_LABELS.index(item)
labels[labels == index] = -200
seen_index = ((labels != self.ignore_index) & (labels != -200))
unseen_index = labels == -200
import pdb
pdb.set_trace()
loss1 = self.crossentropy(probas[seen_index], labels[seen_index])
loss2 = self.pseudo_supervised(probas[unseen_index])
return loss1 + loss2
def lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction
(between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a
list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(
lovasz_softmax_flat(
*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probas, labels)
)
else:
loss = lovasz_softmax_flat(
*flatten_probas(probas, labels, ignore), classes=classes
)
return loss
def lovasz_softmax_flat(probas, labels, classes="present"):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels,
or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
# 3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H * W)
B, C, H, W = probas.size()
# B * H * W, C = P, C
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def jaccard_loss(probas, labels, ignore=None, smooth=100, bk_class=None):
"""
Something wrong with this loss
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction.
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or
a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
true_1_hot = torch.eye(vprobas.shape[1])[vlabels]
if bk_class:
one_hot_assignment = torch.ones_like(vlabels)
one_hot_assignment[vlabels == bk_class] = 0
one_hot_assignment = one_hot_assignment.float().unsqueeze(1)
true_1_hot = true_1_hot * one_hot_assignment
true_1_hot = true_1_hot.to(vprobas.device)
intersection = torch.sum(vprobas * true_1_hot)
cardinality = torch.sum(vprobas + true_1_hot)
loss = (intersection + smooth / (cardinality - intersection + smooth)).mean()
return (1 - loss) * smooth
def hinge_jaccard_loss(
probas, labels, ignore=None, classes="present", hinge=0.1, smooth=100
):
"""
Multi-class Hinge Jaccard loss
probas: [B, C, H, W] Variable, class probabilities at each prediction.
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels,
or a list of classes to average.
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
C = vprobas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
if c in vlabels:
c_sample_ind = vlabels == c
cprobas = vprobas[c_sample_ind, :]
non_c_ind = np.array([a for a in class_to_sum if a != c])
class_pred = cprobas[:, c]
max_non_class_pred = torch.max(cprobas[:, non_c_ind], dim=1)[0]
TP = (
torch.sum(torch.clamp(class_pred - max_non_class_pred, max=hinge) + 1.0)
+ smooth
)
FN = torch.sum(
torch.clamp(max_non_class_pred - class_pred, min=-hinge) + hinge
)
if (~c_sample_ind).sum() == 0:
FP = 0
else:
nonc_probas = vprobas[~c_sample_ind, :]
class_pred = nonc_probas[:, c]
max_non_class_pred = torch.max(nonc_probas[:, non_c_ind], dim=1)[0]
FP = torch.sum(
torch.clamp(class_pred - max_non_class_pred, max=hinge) + 1.0
)
losses.append(1 - TP / (TP + FP + FN))
if len(losses) == 0:
return 0
return mean(losses)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(ls, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
ls = iter(ls)
if ignore_nan:
ls = ifilterfalse(isnan, ls)
try:
n = 1
acc = next(ls)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(ls, 2):
acc += v
if n == 1:
return acc
return acc / n
| 14,503 | 32.496536 | 88 |
py
|
CLIP2Scene
|
CLIP2Scene-main/downstream/dataloader_kitti.py
|
import os
import re
import torch
import numpy as np
from torch.utils.data import Dataset
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
# from torchsparse import SparseTensor
# from torchsparse.utils.collate import sparse_collate_fn
# from torchsparse.utils.quantize import sparse_quantize
TRAIN_SET = {0, 1, 2, 3, 4, 5, 6, 7, 9, 10}
VALIDATION_SET = {8}
TEST_SET = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
def custom_collate_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
input = list(zip(*list_data))
labelized = len(input) == 6
if labelized:
xyz, coords, feats, labels, evaluation_labels, inverse_indexes = input
else:
xyz, coords, feats, inverse_indexes = input
coords_batch, len_batch = [], []
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
coords_batch.append(
torch.cat((coo, torch.ones(N, 1, dtype=torch.int32) * batch_id), 1)
)
len_batch.append(N)
# for batch_id, coo in enumerate(coords):
# N = coords[batch_id].shape[0]
# coords_batch.append(
# torch.cat((torch.ones(N, 1, dtype=torch.int32) * batch_id, coo), 1)
# )
# len_batch.append(N)
# coords_batch_sparse = []
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats, 0).float()
if labelized:
labels_batch = torch.cat(labels, 0).long()
return {
"pc": xyz, # point cloud
"sinput_C": coords_batch, # discrete coordinates (ME)
"sinput_F": feats_batch, # point features (N, 3)
"len_batch": len_batch, # length of each batch
"labels": labels_batch, # labels for each (voxelized) point
"evaluation_labels": evaluation_labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
}
else:
return {
"pc": xyz,
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"len_batch": len_batch,
"inverse_indexes": inverse_indexes,
}
class SemanticKITTIDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
Note that superpixels fonctionality have been removed.
"""
def __init__(self, phase, config, transforms=None):
self.phase = phase
self.labels = self.phase != "test"
self.transforms = transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
if phase == "train":
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
else:
skip_ratio = 1
if phase in ("train", "parametrizing"):
phase_set = TRAIN_SET
elif phase in ("val", "verifying"):
phase_set = VALIDATION_SET
elif phase == "test":
phase_set = TEST_SET
self.list_files = []
for num in phase_set:
directory = next(
os.walk(
f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/{num:0>2d}/velodyne"
)
)
self.list_files.extend(
map(
lambda x: f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/"
f"{num:0>2d}/velodyne/" + x,
directory[2],
)
)
self.list_files = sorted(self.list_files)[::skip_ratio]
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 10: 1, 11: 2, 13: 5, 15: 3, 16: 5, 18: 4, 20: 5, 30: 6, 31: 7,
32: 8, 40: 9, 44: 10, 48: 11, 49: 12, 50: 13, 51: 14, 52: 0, 60: 9, 70: 15,
71: 16, 72: 17, 80: 18, 81: 19, 99: 0, 252: 1, 253: 7, 254: 6, 255: 8,
256: 5, 257: 5, 258: 4, 259: 5,
}
def __len__(self):
return len(self.list_files)
def __getitem__(self, idx):
lidar_file = self.list_files[idx]
points = np.fromfile(lidar_file, dtype=np.float32).reshape((-1, 4))
# get the points (4th coordinate is the point intensity)
pc = points[:, :3]
if self.labels:
lidarseg_labels_filename = re.sub(
"bin", "label", re.sub("velodyne", "labels", lidar_file)
)
points_labels = (
np.fromfile(lidarseg_labels_filename, dtype=np.uint32) & 0xFFFF
)
pc = torch.tensor(pc)
# apply the transforms (augmentation)
if self.transforms:
pc = self.transforms(pc)
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug, return_index=True, return_inverse=True
# )
# discrete_coords, indexes, inverse_indexes = sparse_quantize(coords_aug.numpy(),
# return_index=True,
# return_inverse=True)
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# unique_feats = torch.tensor(points[indexes][:, 3:])
unique_feats = torch.tensor(points[indexes][:, 3:] + 1.)
# print(((unique_feats - 1) != 0).sum() / unique_feats.shape[0] )
if self.labels:
points_labels = torch.tensor(
np.vectorize(self.eval_labels.__getitem__)(points_labels),
dtype=torch.int32,
)
unique_labels = points_labels[indexes]
if self.labels:
return (
pc,
discrete_coords,
unique_feats,
unique_labels,
points_labels,
inverse_indexes,
)
else:
return pc, discrete_coords, unique_feats, inverse_indexes
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = SemanticKITTIDataset(phase=phase, transforms=transforms, config=config)
collate_fn = custom_collate_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
# shuffle=False if sampler else True,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
# sampler=sampler,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader
| 7,816 | 33.436123 | 147 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/nuuuu.py
|
from nuscenes import NuScenes
import pickle
import os
import numpy as np
import json
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
data_path = "/home/PJLAB/liuyouquan/nuScenes/"
nusc = NuScenes(version='v1.0-trainval', dataroot=data_path, verbose=True)
# imageset = "/home/PJLAB/liuyouquan/nuScenes/nuscenes_infos_val.pkl"
#############train
# phase_scenes = create_splits_scenes()['val']
phase_scenes = CUSTOM_SPLIT
# phase_scenes = list(set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT))
skip_counter = 0
list_keyframes = []
for scene_idx in range(len(nusc.scene)):
scene = nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
skip_counter += 1
if skip_counter % 1 == 0:
current_sample_token = scene["first_sample_token"]
# Loop to get all successive keyframes
list_data = []
while current_sample_token != "":
current_sample = nusc.get("sample", current_sample_token)
list_data.append(current_sample["data"])
current_sample_token = current_sample["next"]
list_keyframes.extend(list_data)
b = json.dumps(list_keyframes)
f2 = open('./list_keyframes_verifying.json','w')
f2.write(b)
f2.close()
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
save_dict = {}
for idx in range(len(list_keyframes)):
lk = list_keyframes[idx]
pointsensor = nusc.get("sample_data", lk["LIDAR_TOP"])
sub_pcl_path = pointsensor["filename"]
labels_filename = nusc.get("lidarseg", lk["LIDAR_TOP"])["filename"].replace("lidarseg/", "lidarseg2/")
print(sub_pcl_path)
print(labels_filename)
cam_dict = {}
for i, camera_name in enumerate(camera_list):
ap_list = {}
cam = nusc.get("sample_data", lk[camera_name])
cam_sub_path = cam["filename"] # todo
ap_list["camera_name"] = cam_sub_path
ap_list['token'] = cam['token']
cs_record = nusc.get(
"calibrated_sensor", pointsensor["calibrated_sensor_token"])
ap_list["cs_record"] = cs_record
poserecord = nusc.get("ego_pose", pointsensor["ego_pose_token"])
ap_list["poserecord"] = poserecord
poserecord_ = nusc.get("ego_pose", cam["ego_pose_token"])
ap_list["poserecord_"] = poserecord_
cs_record_ = nusc.get(
"calibrated_sensor", cam["calibrated_sensor_token"]
)
ap_list["cs_record_"] = cs_record_
cam_dict[camera_name] = ap_list
# save_dict[lk["LIDAR_TOP"]] = [sub_pcl_path, cam_dict]
save_dict[lk["LIDAR_TOP"]] = {"lidar_name": sub_pcl_path,
"labels_name": labels_filename,
"cam_info": cam_dict}
b1 = json.dumps(save_dict)
f = open('./save_dict_verifying.json','w')
f.write(b1)
f.close()
# print(scene)
# '''
# with open(imageset, 'rb') as f:
# data = pickle.load(f)
# nusc_infos = data['infos']
# nusc_train = {}
# for index in range(len(nusc_infos)):
# info = nusc_infos[index]
# lidar_path = info['lidar_path'][16:]
# print(lidar_path)
# print('='*80)
# lidar_sd_token = nusc.get('sample', info['token'])['data']['LIDAR_TOP']
# lidarseg_labels_filename = os.path.join("s3://liuyouquan/nuScenes",
# nusc.get('lidarseg', lidar_sd_token)['filename'])
# print(lidarseg_labels_filename)
# nusc_train[lidar_path] = lidarseg_labels_filename
# points = np.fromfile(os.path.join(data_path, lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
#
# # b = json.dumps(nusc_train)
# # f2 = open('./nusc_val.json','w')
# # f2.write(b)
# # f2.close()
# read
# f = open('./nusc_val.json','r')
# content = f.read()
# a = json.loads(content)
# print(a)
# print(len(a))
# f.close()
| 5,540 | 32.786585 | 108 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/read_config.py
|
import yaml
from datetime import datetime as dt
def generate_config(file):
with open(file, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config["datetime"] = dt.today().strftime("%d%m%y-%H%M")
return config
| 246 | 19.583333 | 63 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/savemodel.py
|
import torch
import os
def save_checkpoint(self):
trained_epoch = self.cur_epoch + 1
ckpt_name = self.ckpt_dir / ('checkpoint_epoch_%d' % trained_epoch)
checkpoint_state = {}
checkpoint_state['epoch'] = trained_epoch
checkpoint_state['it'] = self.it
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(self.model.module.state_dict())
else:
model_state = model_state_to_cpu(self.model.state_dict())
checkpoint_state['model_state'] = model_state
checkpoint_state['optimizer_state'] = self.optimizer.state_dict()
checkpoint_state['scaler'] = self.scaler.state_dict()
checkpoint_state['lr_scheduler_state'] = self.lr_scheduler.state_dict()
torch.save(checkpoint_state, f"{ckpt_name}.pth")
def resume(self, filename):
if not os.path.isfile(filename):
raise FileNotFoundError
self.logger.info(f"==> Loading parameters from checkpoint {filename}")
checkpoint = torch.load(filename, map_location='cpu')
# self.cur_epoch = checkpoint['epoch']
# self.start_epoch = checkpoint['epoch']
# self.it = checkpoint['it']
self.model.load_params(checkpoint['model_state'], strict=True)
# self.optimizer.load_state_dict(checkpoint['optimizer_state'])
# self.scaler.load_state_dict(checkpoint['scaler'])
# self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state'])
self.logger.info('==> Done')
return
| 1,481 | 41.342857 | 76 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/chamfer_distance.py
|
import torch
import torch.nn as nn
def compute_chamfer_distance(p1, p2):
'''
Calculate Chamfer Distance between two point sets
:param p1: size[bn, N, D]
:param p2: size[bn, M, D]
:param debug: whether need to output debug info
:return: sum of Chamfer Distance of two point sets
'''
diff = p1[:, :, None, :] - p2[:, None, :, :]
dist = torch.sum(diff*diff, dim=3)
dist1 = dist
dist2 = torch.transpose(dist, 1, 2)
dist_min1, _ = torch.min(dist1, dim=2)
dist_min2, _ = torch.min(dist2, dim=2)
return dist_min1, dist_min2
class ComputeCDLoss(nn.Module):
def __init__(self):
super(ComputeCDLoss, self).__init__()
def forward(self, recon_points, gt_points):
dist1, dist2 = compute_chamfer_distance(recon_points, gt_points)
loss = (torch.sum(dist1) + torch.sum(dist2)) / (recon_points.shape[0] + 1E-6)
# print(loss)
return loss
| 934 | 25.714286 | 85 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/parse_dataset.py
|
import re
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.splits import create_splits_scenes
if __name__ == "__main__":
"""
File used to generate the parametrizing splits
"""
nusc = NuScenes(
version="v1.0-trainval", dataroot="datasets/nuscenes/", verbose=True
)
phase_scenes = create_splits_scenes()["train"]
n_rain = 0
n_night = 0
n_singapore = 0
total = 0
for scene_idx in range(len(nusc.scene)):
scene = nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
description = re.split("[, ]", scene["description"].lower())
rain = "rain" in description
night = "night" in description
singapore = nusc.get("log", scene["log_token"])["location"].startswith(
"singapore"
)
n_rain += rain
n_night += night
n_singapore += singapore
total += 1
print(
f"Statistics in the train set:\n"
f"{total} scenes\n"
f"{n_rain} raining scenes\n"
f"{n_night} night-time scenes\n"
f"{n_singapore} scenes in Singapore\n"
f"{total - n_singapore} scenes in Boston"
)
phase_scenes = create_splits_scenes()["val"]
n_rain = 0
n_night = 0
n_singapore = 0
total = 0
for scene_idx in range(len(nusc.scene)):
scene = nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
description = re.split("[, ]", scene["description"].lower())
rain = "rain" in description
night = "night" in description
singapore = nusc.get("log", scene["log_token"])["location"].startswith(
"singapore"
)
n_rain += rain
n_night += night
n_singapore += singapore
total += 1
print(
f"Statistics in the val set:\n"
f"{total} scenes\n"
f"{n_rain} raining scenes\n"
f"{n_night} night-time scenes\n"
f"{n_singapore} scenes in Singapore\n"
f"{total - n_singapore} scenes in Boston"
)
while True:
verifying = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032",
"scene-0042", "scene-0045", "scene-0049", "scene-0052", "scene-0054",
"scene-0056", "scene-0066", "scene-0067", "scene-0073", "scene-0131",
"scene-0152", "scene-0166", "scene-0168", "scene-0183", "scene-0190",
"scene-0194", "scene-0208", "scene-0210", "scene-0211", "scene-0241",
"scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306",
"scene-0350", "scene-0352", "scene-0358", "scene-0361", "scene-0365",
"scene-0368", "scene-0377", "scene-0388", "scene-0391", "scene-0395",
"scene-0413", "scene-0427", "scene-0428", "scene-0438", "scene-0444",
"scene-0452", "scene-0453", "scene-0459", "scene-0463", "scene-0464",
"scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658",
"scene-0669", "scene-0678", "scene-0687", "scene-0701", "scene-0703",
"scene-0706", "scene-0710", "scene-0715", "scene-0726", "scene-0735",
"scene-0740", "scene-0758", "scene-0786", "scene-0790", "scene-0804",
"scene-0806", "scene-0847", "scene-0856", "scene-0868", "scene-0882",
"scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024",
"scene-1044", "scene-1058", "scene-1094", "scene-1098", "scene-1107",
] # Chosen mini-val subset. Replace by a random generator to create another subset
n_rain = 0
n_night = 0
n_singapore = 0
total = 0
for scene_idx in range(len(nusc.scene)):
scene = nusc.scene[scene_idx]
if scene["name"] in verifying:
description = re.split("[, ]", scene["description"].lower())
rain = "rain" in description
night = "night" in description
singapore = nusc.get("log", scene["log_token"])["location"].startswith(
"singapore"
)
n_rain += rain
n_night += night
n_singapore += singapore
if n_singapore == 44 and n_rain == 20 and n_night == 12:
break
print(verifying)
| 4,610 | 40.169643 | 91 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/prompt_engineering.py
|
import numpy as np
import torch
import clip
import argparse
scannet_classes = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'other furniture']
nuscenes_classes = ["barrier", "bicycle", "bus", "car", "construction vehicle", "motorcycle", "pedestrian", "traffic_cone", "trailer", "truck", "driveable surface", "other_flat", "sidewalk", "terrain", "manmade", "vegetation"]
kitti_classes = [ "car", "bicycle", "motorcycle", "truck", "other vehicle", "person", "bicyclist", "motorcyclist", "road", "parking", "sidewalk", "other ground", "building", "fence", "vegetation", "trunk", "terrain", "pole", "traffic sign"]
cityscapes_classes = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
ade20k_classes = ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag']
coco_stuff_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', 'building', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling', 'tile ceiling', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk', 'dirt', 'door', 'fence', 'marble floor', 'floor', 'stone floor', 'tile floor', 'wood floor', 'flower', 'fog', 'food', 'fruit', 'furniture', 'grass', 'gravel', 'ground', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', 'mirror', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky', 'skyscraper', 'snow', 'solid', 'stairs', 'stone', 'straw', 'structural', 'table', 'tent', 'textile', 'towel', 'tree', 'vegetable', 'brick wall', 'concrete wall', 'wall', 'panel wall', 'stone wall', 'tile wall', 'wood wall', 'water', 'waterdrops', 'blind window', 'window', 'wood']
voc_classes = ['airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv monitor']
pascal_context_classes = ['airplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', 'potted plant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', 'tree', 'truck', 'tv monitor', 'wall', 'water', 'window', 'wood']
all_pascal_context_classes = ['accordion', 'airplane', 'air conditioner', 'antenna', 'artillery', 'ashtray', 'atrium', 'baby carriage', 'bag', 'ball', 'balloon', 'bamboo weaving', 'barrel', 'baseball bat', 'basket', 'basketball backboard', 'bathtub', 'bed', 'bedclothes', 'beer', 'bell', 'bench', 'bicycle', 'binoculars', 'bird', 'bird cage', 'bird feeder', 'bird nest', 'blackboard', 'board', 'boat', 'bone', 'book', 'bottle', 'bottle opener', 'bowl', 'box', 'bracelet', 'brick', 'bridge', 'broom', 'brush', 'bucket', 'building', 'bus', 'cabinet', 'cabinet door', 'cage', 'cake', 'calculator', 'calendar', 'camel', 'camera', 'camera lens', 'can', 'candle', 'candle holder', 'cap', 'car', 'card', 'cart', 'case', 'casette recorder', 'cash register', 'cat', 'cd', 'cd player', 'ceiling', 'cell phone', 'cello', 'chain', 'chair', 'chessboard', 'chicken', 'chopstick', 'clip', 'clippers', 'clock', 'closet', 'cloth', 'clothes tree', 'coffee', 'coffee machine', 'comb', 'computer', 'concrete', 'cone', 'container', 'control booth', 'controller', 'cooker', 'copying machine', 'coral', 'cork', 'corkscrew', 'counter', 'court', 'cow', 'crabstick', 'crane', 'crate', 'cross', 'crutch', 'cup', 'curtain', 'cushion', 'cutting board', 'dais', 'disc', 'disc case', 'dishwasher', 'dock', 'dog', 'dolphin', 'door', 'drainer', 'dray', 'drink dispenser', 'drinking machine', 'drop', 'drug', 'drum', 'drum kit', 'duck', 'dumbbell', 'earphone', 'earrings', 'egg', 'electric fan', 'electric iron', 'electric pot', 'electric saw', 'electronic keyboard', 'engine', 'envelope', 'equipment', 'escalator', 'exhibition booth', 'extinguisher', 'eyeglass', 'fan', 'faucet', 'fax machine', 'fence', 'ferris wheel', 'fire extinguisher', 'fire hydrant', 'fire place', 'fish', 'fish tank', 'fishbowl', 'fishing net', 'fishing pole', 'flag', 'flagstaff', 'flame', 'flashlight', 'floor', 'flower', 'fly', 'foam', 'food', 'footbridge', 'forceps', 'fork', 'forklift', 'fountain', 'fox', 'frame', 'fridge', 'frog', 'fruit', 'funnel', 'furnace', 'game controller', 'game machine', 'gas cylinder', 'gas hood', 'gas stove', 'gift box', 'glass', 'glass marble', 'globe', 'glove', 'goal', 'grandstand', 'grass', 'gravestone', 'ground', 'guardrail', 'guitar', 'gun', 'hammer', 'hand cart', 'handle', 'handrail', 'hanger', 'hard disk drive', 'hat', 'hay', 'headphone', 'heater', 'helicopter', 'helmet', 'holder', 'hook', 'horse', 'horse-drawn carriage', 'hot-air balloon', 'hydrovalve', 'ice', 'inflator pump', 'ipod', 'iron', 'ironing board', 'jar', 'kart', 'kettle', 'key', 'keyboard', 'kitchen range', 'kite', 'knife', 'knife block', 'ladder', 'ladder truck', 'ladle', 'laptop', 'leaves', 'lid', 'life buoy', 'light', 'light bulb', 'lighter', 'line', 'lion', 'lobster', 'lock', 'machine', 'mailbox', 'mannequin', 'map', 'mask', 'mat', 'match book', 'mattress', 'menu', 'metal', 'meter box', 'microphone', 'microwave', 'mirror', 'missile', 'model', 'money', 'monkey', 'mop', 'motorbike', 'mountain', 'mouse', 'mouse pad', 'musical instrument', 'napkin', 'net', 'newspaper', 'oar', 'ornament', 'outlet', 'oven', 'oxygen bottle', 'pack', 'pan', 'paper', 'paper box', 'paper cutter', 'parachute', 'parasol', 'parterre', 'patio', 'pelage', 'pen', 'pen container', 'pencil', 'person', 'photo', 'piano', 'picture', 'pig', 'pillar', 'pillow', 'pipe', 'pitcher', 'plant', 'plastic', 'plate', 'platform', 'player', 'playground', 'pliers', 'plume', 'poker', 'poker chip', 'pole', 'pool table', 'postcard', 'poster', 'pot', 'potted plant', 'printer', 'projector', 'pumpkin', 'rabbit', 'racket', 'radiator', 'radio', 'rail', 'rake', 'ramp', 'range hood', 'receiver', 'recorder', 'recreational machines', 'remote control', 'road', 'robot', 'rock', 'rocket', 'rocking horse', 'rope', 'rug', 'ruler', 'runway', 'saddle', 'sand', 'saw', 'scale', 'scanner', 'scissors', 'scoop', 'screen', 'screwdriver', 'sculpture', 'scythe', 'sewer', 'sewing machine', 'shed', 'sheep', 'shell', 'shelves', 'shoe', 'shopping cart', 'shovel', 'sidecar', 'sidewalk', 'sign', 'signal light', 'sink', 'skateboard', 'ski', 'sky', 'sled', 'slippers', 'smoke', 'snail', 'snake', 'snow', 'snowmobiles', 'sofa', 'spanner', 'spatula', 'speaker', 'speed bump', 'spice container', 'spoon', 'sprayer', 'squirrel', 'stage', 'stair', 'stapler', 'stick', 'sticky note', 'stone', 'stool', 'stove', 'straw', 'stretcher', 'sun', 'sunglass', 'sunshade', 'surveillance camera', 'swan', 'sweeper', 'swim ring', 'swimming pool', 'swing', 'switch', 'table', 'tableware', 'tank', 'tap', 'tape', 'tarp', 'telephone', 'telephone booth', 'tent', 'tire', 'toaster', 'toilet', 'tong', 'tool', 'toothbrush', 'towel', 'toy', 'toy car', 'track', 'train', 'trampoline', 'trash bin', 'tray', 'tree', 'tricycle', 'tripod', 'trophy', 'truck', 'tube', 'turtle', 'tv monitor', 'tweezers', 'typewriter', 'umbrella', 'unknown', 'vacuum cleaner', 'vending machine', 'video camera', 'video game console', 'video player', 'video tape', 'violin', 'wakeboard', 'wall', 'wallet', 'wardrobe', 'washing machine', 'watch', 'water', 'water dispenser', 'water pipe', 'water skate board', 'watermelon', 'whale', 'wharf', 'wheel', 'wheelchair', 'window', 'window blinds', 'wineglass', 'wire', 'wood', 'wool']
bg_classes = ['building', 'ground', 'grass', 'tree', 'sky']
mickey_classes = ['Mickey Mouse', 'Donald Duck'] + bg_classes
batman_classes = ['Batman', 'Joker'] + bg_classes
mario_classes = ['Mario', 'Luigi'] + bg_classes
gates_classes = ['Bill Gates', 'Steve Jobs'] + bg_classes
cityscapes_no_person_classes = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
batman_ext_classes = ['Batman', 'Joker', 'James Gordon', 'The Penguin', 'Robin', 'Alfred Pennyworth', 'Catwoman', 'Harley Quinn'] + cityscapes_no_person_classes
sports_classes = ['baseball player', 'basketball player', 'soccer player', 'football player', 'person', 'background', 'wall', 'building', 'sky', 'grass', 'tree', 'ground', 'floor', 'baseball court', 'basketball court', 'soccer court', 'football court']
car_brands_classes = ['Bugatti', 'Cadillac', 'Porsche', 'Lamborghini', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'background']
blur_classes = ['very blurry car', 'car', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
car_color_classes = ['white car', 'blue car', 'red car', 'black car', 'green car', 'yellow car', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
prompt_templates = [
'a bad photo of a {}.', 'a photo of many {}.', 'a sculpture of a {}.', 'a photo of the hard to see {}.', 'a low resolution photo of the {}.', 'a rendering of a {}.', 'graffiti of a {}.', 'a bad photo of the {}.', 'a cropped photo of the {}.', 'a tattoo of a {}.', 'the embroidered {}.', 'a photo of a hard to see {}.', 'a bright photo of a {}.', 'a photo of a clean {}.', 'a photo of a dirty {}.', 'a dark photo of the {}.', 'a drawing of a {}.', 'a photo of my {}.', 'the plastic {}.', 'a photo of the cool {}.', 'a close-up photo of a {}.', 'a black and white photo of the {}.', 'a painting of the {}.', 'a painting of a {}.', 'a pixelated photo of the {}.', 'a sculpture of the {}.', 'a bright photo of the {}.', 'a cropped photo of a {}.', 'a plastic {}.', 'a photo of the dirty {}.', 'a jpeg corrupted photo of a {}.', 'a blurry photo of the {}.', 'a photo of the {}.', 'a good photo of the {}.', 'a rendering of the {}.', 'a {} in a video game.', 'a photo of one {}.', 'a doodle of a {}.', 'a close-up photo of the {}.', 'a photo of a {}.', 'the origami {}.', 'the {} in a video game.', 'a sketch of a {}.', 'a doodle of the {}.', 'a origami {}.', 'a low resolution photo of a {}.', 'the toy {}.', 'a rendition of the {}.', 'a photo of the clean {}.', 'a photo of a large {}.', 'a rendition of a {}.', 'a photo of a nice {}.', 'a photo of a weird {}.', 'a blurry photo of a {}.', 'a cartoon {}.', 'art of a {}.', 'a sketch of the {}.', 'a embroidered {}.', 'a pixelated photo of a {}.', 'itap of the {}.', 'a jpeg corrupted photo of the {}.', 'a good photo of a {}.', 'a plushie {}.', 'a photo of the nice {}.', 'a photo of the small {}.', 'a photo of the weird {}.', 'the cartoon {}.', 'art of the {}.', 'a drawing of the {}.', 'a photo of the large {}.', 'a black and white photo of a {}.', 'the plushie {}.', 'a dark photo of a {}.', 'itap of a {}.', 'graffiti of the {}.', 'a toy {}.', 'itap of my {}.', 'a photo of a cool {}.', 'a photo of a small {}.', 'a tattoo of the {}.', 'there is a {} in the scene.', 'there is the {} in the scene.', 'this is a {} in the scene.', 'this is the {} in the scene.', 'this is one {} in the scene.',
]
def parse_args():
parser = argparse.ArgumentParser(description='Prompt engeering script')
parser.add_argument('--model', default='RN50', choices=['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT32', 'ViT16'], help='clip model name')
parser.add_argument('--class-set', default=['voc'], nargs='+',
choices=['kitti', 'nuscenes', 'scannet', 'city', 'ade', 'stuff', 'voc', 'context', 'acontext', 'mickey', 'batman', 'mario', 'gates', 'blur', 'sports', 'car_brands', 'batman_ext', 'car_color'],
help='the set of class names')
parser.add_argument('--no-prompt-eng', action='store_true', help='disable prompt engineering')
args = parser.parse_args()
return args
def zeroshot_classifier(model_name, classnames, templates):
model, preprocess = clip.load(model_name)
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] #format with class
texts = clip.tokenize(texts).cuda() #tokenize
class_embeddings = model.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
return zeroshot_weights
if __name__ == '__main__':
args = parse_args()
classes = []
all_set_name = ''
name_mapping = {'kitti': kitti_classes, 'nuscenes': nuscenes_classes, 'scannet': scannet_classes, 'city': cityscapes_classes, 'ade': ade20k_classes, 'stuff': coco_stuff_classes, 'voc': voc_classes, 'context': pascal_context_classes, 'acontext': all_pascal_context_classes, 'mickey': mickey_classes, 'batman': batman_classes, 'mario': mario_classes, 'gates': gates_classes, 'blur': blur_classes, 'sports': sports_classes, 'car_brands': car_brands_classes, 'batman_ext': batman_ext_classes, 'car_color': car_color_classes}
for set_name in args.class_set:
if set_name in name_mapping:
classes += name_mapping[set_name]
all_set_name += '_{}'.format(set_name)
if set_name in ['blur'] or args.no_prompt_eng:
prompt_templates = ['a photo of a {}.']
# remove redundant classes
classes = list(dict.fromkeys(classes))
# remove the first underline
all_set_name = all_set_name[1:]
print(classes)
print(f"{len(classes)} class(es), {len(prompt_templates)} template(s)")
# ['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT-B/32', 'ViT-B/16']
name_mapping = {'RN50': 'RN50', 'RN101': 'RN101', 'RN50x4': 'RN50x4', 'RN50x16': 'RN50x16', 'ViT32': 'ViT-B/32', 'ViT16': 'ViT-B/16'}
zeroshot_weights = zeroshot_classifier(name_mapping[args.model], classes, prompt_templates)
zeroshot_weights = zeroshot_weights.permute(1, 0).float()
print(zeroshot_weights.shape)
prefix = f'{all_set_name}_{args.model}'
if args.no_prompt_eng:
prefix += '_npe'
torch.save(zeroshot_weights, f'{prefix}_clip_text.pth')
| 17,422 | 171.50495 | 5,180 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/scannet_utils.py
|
import os, sys
import csv
import json
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
import imageio
except:
print("Please install the module 'imageio' for image processing, e.g.")
print("pip install imageio")
sys.exit(-1)
#g_label_names = ['unannotated', 'wall', 'floor', 'chair', 'table', 'desk', 'bed', 'bookshelf', 'sofa', 'sink', 'bathtub', 'toilet', 'curtain', 'counter', 'door', 'window', 'shower curtain', 'refridgerator', 'picture', 'cabinet', 'otherfurniture']
# nyu40 label (1~40), 0 for unannotated, 41 for unknown
# only evaluate 20 classes in nyu40
CLASS_LABELS_scannet = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'other furniture']
CLASS_LABELS_S3DIS = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookshelf', 'board', 'clutter']
VALID_CLASS_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
#UNKNOWN_ID = np.max(VALID_CLASS_IDS) + 1 # scannet github
UNKNOWN_ID = 41
UNANNOTATE_ID = 0
# only evaluate 20 classes in nyu40
# map nyu40 to 1~21, 0 for unannotated and unknown
g_label_names = ['unannotate'] + CLASS_LABELS_scannet
g_label_ids = [UNANNOTATE_ID] + VALID_CLASS_IDS
# if string s represents an int
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def read_aggregation(filename):
assert os.path.isfile(filename)
object_id_to_segs = {}
label_to_segs = {}
with open(filename) as f:
data = json.load(f)
num_objects = len(data['segGroups'])
for i in range(num_objects):
object_id = data['segGroups'][i]['objectId'] + 1 # instance ids should be 1-indexed
label = data['segGroups'][i]['label']
segs = data['segGroups'][i]['segments']
object_id_to_segs[object_id] = segs
if label in label_to_segs:
label_to_segs[label].extend(segs)
else:
label_to_segs[label] = segs
return object_id_to_segs, label_to_segs
def read_segmentation(filename):
assert os.path.isfile(filename)
seg_to_verts = {}
with open(filename) as f:
data = json.load(f)
num_verts = len(data['segIndices'])
for i in range(num_verts):
seg_id = data['segIndices'][i]
if seg_id in seg_to_verts:
seg_to_verts[seg_id].append(i)
else:
seg_to_verts[seg_id] = [i]
return seg_to_verts, num_verts
def read_label_mapping(filename, label_from='raw_category', label_to='nyu40id'):
assert os.path.isfile(filename)
mapping = dict()
with open(filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
mapping[row[label_from]] = int(row[label_to])
# if ints convert
if represents_int(list(mapping.keys())[0]):
mapping = {int(k):v for k,v in mapping.items()}
return mapping
# input: scene_types.txt or scene_types_all.txt
def read_scene_types_mapping(filename, remove_spaces=True):
assert os.path.isfile(filename)
mapping = dict()
lines = open(filename).read().splitlines()
lines = [line.split('\t') for line in lines]
if remove_spaces:
mapping = { x[1].strip():int(x[0]) for x in lines }
else:
mapping = { x[1]:int(x[0]) for x in lines }
return mapping
# color by label
def visualize_label_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
for idx, color in enumerate(color_palette):
vis_image[image==idx] = color
imageio.imwrite(filename, vis_image)
# color by different instances (mod length of color palette)
def visualize_instance_image(filename, image):
height = image.shape[0]
width = image.shape[1]
vis_image = np.zeros([height, width, 3], dtype=np.uint8)
color_palette = create_color_palette()
instances = np.unique(image)
for idx, inst in enumerate(instances):
vis_image[image==inst] = color_palette[inst%len(color_palette)]
imageio.imwrite(filename, vis_image)
def create_color_palette():
return [
(174, 199, 232), # wall
(152, 223, 138), # floor
(31, 119, 180), # cabinet
(255, 187, 120), # bed
(188, 189, 34), # chair
(140, 86, 75), # sofa
(255, 152, 150), # table
(214, 39, 40), # door
(197, 176, 213), # window
(148, 103, 189), # bookshelf
(196, 156, 148), # picture
(23, 190, 207), # counter
(247, 182, 210), # desk
(219, 219, 141), # curtain
(255, 127, 14), # refrigerator
(158, 218, 229), # shower curtain
(44, 160, 44), # toilet
(112, 128, 144), # sink
(227, 119, 194), # bathtub
(82, 84, 163), # otherfurn
(0, 0, 0),
]
# color palette for nyu40 labels
# def create_color_palette():
# return [
# (0, 0, 0),
# (174, 199, 232), # wall
# (152, 223, 138), # floor
# (31, 119, 180), # cabinet
# (255, 187, 120), # bed
# (188, 189, 34), # chair
# (140, 86, 75), # sofa
# (255, 152, 150), # table
# (214, 39, 40), # door
# (197, 176, 213), # window
# (148, 103, 189), # bookshelf
# (196, 156, 148), # picture
# (23, 190, 207), # counter
# (178, 76, 76),
# (247, 182, 210), # desk
# (66, 188, 102),
# (219, 219, 141), # curtain
# (140, 57, 197),
# (202, 185, 52),
# (51, 176, 203),
# (200, 54, 131),
# (92, 193, 61),
# (78, 71, 183),
# (172, 114, 82),
# (255, 127, 14), # refrigerator
# (91, 163, 138),
# (153, 98, 156),
# (140, 153, 101),
# (158, 218, 229), # shower curtain
# (100, 125, 154),
# (178, 127, 135),
# (120, 185, 128),
# (146, 111, 194),
# (44, 160, 44), # toilet
# (112, 128, 144), # sink
# (96, 207, 209),
# (227, 119, 194), # bathtub
# (213, 92, 176),
# (94, 106, 211),
# (82, 84, 163), # otherfurn
# (100, 85, 144)
# ]
| 6,494 | 32.828125 | 247 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/metrics.py
|
import torch
def confusion_matrix(preds, labels, num_classes):
hist = (
torch.bincount(
num_classes * labels + preds,
minlength=num_classes ** 2,
)
.reshape(num_classes, num_classes)
.float()
)
return hist
def compute_IoU_from_cmatrix(hist, ignore_index=None):
"""Computes the Intersection over Union (IoU).
Args:
hist: confusion matrix.
Returns:
m_IoU, fw_IoU, and matrix IoU
"""
if ignore_index is not None:
hist[ignore_index] = 0.0
intersection = torch.diag(hist)
union = hist.sum(dim=1) + hist.sum(dim=0) - intersection
IoU = intersection.float() / union.float()
IoU[union == 0] = 1.0
if ignore_index is not None:
IoU = torch.cat((IoU[:ignore_index], IoU[ignore_index+1:]))
m_IoU = torch.mean(IoU).item()
fw_IoU = (
torch.sum(intersection) / (2 * torch.sum(hist) - torch.sum(intersection))
).item()
return m_IoU, fw_IoU, IoU
def compute_IoU(preds, labels, num_classes, ignore_index=None):
"""Computes the Intersection over Union (IoU)."""
hist = confusion_matrix(preds, labels, num_classes)
return compute_IoU_from_cmatrix(hist, ignore_index)
| 1,229 | 28.285714 | 81 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/pc_utils.py
|
""" Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
import warnings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
import math
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
import torch
import random
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
#print loc2pc
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
#print 'pc center: ', pc_center
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
#print (i,j,k), vol[i,j,k,:,:]
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
def surface_normal_area(face, vertex):
normals = list()
areas = list()
vertex_to_face = [[] for i in range(len(vertex))]
for fid, f in enumerate(face):
f = f[0]
va, vb, vc = f[0], f[1], f[2]
vertex_to_face[va].append(fid)
vertex_to_face[vb].append(fid)
vertex_to_face[vc].append(fid)
a = vertex[vb] - vertex[va]
b = vertex[vc] - vertex[va]
normal = np.cross(a, b)
area = np.dot(normal, normal) / 2.0
normalized_normal = normal / np.linalg.norm(normal)
normals.append(normalized_normal)
areas.append(area)
return np.array(normals), np.array(areas), vertex_to_face
def vertex_normal(vertex_to_face, normal, areas):
vertex_normals = list()
num_vertex = len(vertex_to_face)
for vid in range(num_vertex):
adj_faces = vertex_to_face[vid]
if len(adj_faces)==0: # single point with no adjancy points
vertex_normals.append([0,0,1])
continue
adj_faces_area = np.expand_dims(np.array(areas[adj_faces]), axis=-1)
adj_faces_normal = np.array(normal[adj_faces])
avg_normal = (adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area)
avg_normal = np.sum(avg_normal, axis=0)
normalized_normal = avg_normal / np.linalg.norm(avg_normal)
#if np.isclose(np.linalg.norm(avg_normal), 0.0):
# print('-------------------')
# print(len(adj_faces))
# print('-------------------')
# print('-------------------')
# print(adj_faces_area.shape, adj_faces_normal.shape, adj_faces_area, adj_faces_normal)
# print(adj_faces_normal * adj_faces_area)
# print(np.sum(adj_faces_area))
# print((adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area))
# print(avg_normal, np.linalg.norm(avg_normal), adj_faces_area, adj_faces_normal)
# print('-------------------')
vertex_normals.append(normalized_normal)
return np.array(vertex_normals)
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def read_ply_rgba(filename):
""" read XYZRGBA point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
return pc_array
def read_ply_rgba_normal(filename):
""" read XYZRGBA and NxNyNz point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
face = plydata['face'].data
f_n, f_a, v_f = surface_normal_area(face, pc_array[:, 0:3])
v_n = vertex_normal(v_f, f_n, f_a)
pc_array = np.concatenate((pc_array, v_n), axis=-1)
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb_normal(points, colors, normals, filename, text=True):
""" input: Nx3, Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
normals = [(normals[i,0], normals[i,1], normals[i,2]) for i in range(normals.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
normal = np.array(normals, dtype=[('nx', 'f4'), ('ny', 'f4'),('nz', 'f4')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr + normal.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
for prop in normal.dtype.names:
vertex_all[prop] = normal[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None, colors=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
print(num_classes)
else:
assert(num_classes>np.max(labels))
if colors is None:
#colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[labels[i]]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def farthest_pts_sampling_abuse(pts, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
diff = pts[:, None, :] - pts[None, :, :]
# dis_mat = np.sum(diff * diff, axis=2)
dis_mat = np.linalg.norm(diff, axis=2)
N = num_samples
perm = np.zeros(N, dtype=np.int64)
lambdas = np.zeros(N)
ds = dis_mat[0, :]
for i in range(1, N):
idx = np.argmax(ds)
perm[i] = idx
lambdas[i] = ds[idx]
ds = np.minimum(ds, dis_mat[idx, :])
return pts[perm, :]
def farthest_pts_sampling(coords, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
pts = coords.numpy()
dis_mat = np.linalg.norm(pts, axis=2)
point_set = []
perm = np.zeros(num_samples, dtype=np.int64)
index = random.randint(0, pts.shape[0] - 1)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
for i in range(1, num_samples):
refer = pts[index]
diff = np.linalg.norm(pts[:, :] - refer[None, :], axis=1)
index = np.argmin(diff)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
point_set = np.vstack(point_set)
return point_set
def random_partition(coords):
# print('1')
mask = torch.ones(coords.size()[0]).numpy()
coords_np = coords.numpy()
sample_num = random.randint(2, 5)
random_index = np.random.randint(coords_np.shape[0], size=sample_num)
sample_points = coords_np[random_index, :]
diff = coords_np[:, None, :] - sample_points[None, :, :]
diff = np.linalg.norm(diff, axis=2)
partitions = np.argmin(diff, axis=1)
filter_ind = random.randint(0, sample_num - 1)
# coords_torch = torch.from_numpy(coords_np[partitions != filter_ind])
coords_torch = coords
mask[partitions == filter_ind] = 0
mask = torch.from_numpy(mask)
# print('4')
# part1 = torch.from_numpy(coords_np[partitions == filter_ind])
# part2 = torch.from_numpy(coords_np[partitions != filter_ind])
return coords_torch, mask
# return part1, part2
def random_rotation(coords):
# scale = torch.eye(3)*random.uniform(0.95, 1.05)
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
#
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
# def random_rotation(coords):
# return coords
def resize_rotation(coords, item):
scale = 0
if item == 'chair':
scale = torch.eye(3) * 0.8
elif item == 'sofa':
scale = torch.eye(3) * 1.75
elif item == 'table':
scale = torch.eye(3) * 1.65
elif item == 'bookshelf':
scale = torch.eye(3) * 1.7
elif item == 'desk':
scale = torch.eye(3) * 1.25
elif item == 'bed':
scale = torch.eye(3) * 2.1
elif item == 'sink':
scale = torch.eye(3) * 1.05
elif item == 'bathtub':
scale = torch.eye(3) * 1.25
elif item == 'toilet':
scale = torch.eye(3) * 0.65
elif item == 'door':
scale = torch.eye(3) * 1.8
elif item == 'curtain':
scale = torch.eye(3) * 2
else :
scale = torch.eye(3) * random.uniform(0.9, 1.75)
'''
if item == 'chair':
scale = torch.eye(3) * random.uniform(5, 5.5)
elif item == 'bed':
scale = torch.eye(3) * random.uniform(1.4, 1.6)
elif item == 'sofa':
scale = torch.eye(3) * random.uniform(9, 9.5)
elif item == 'table':
scale = torch.eye(3) * random.uniform(8, 8.5)
elif item == 'bookshelf':
scale = torch.eye(3) * random.uniform(1.1, 1.2)
elif item == 'desk':
scale = torch.eye(3) * random.uniform(7, 7.5)
elif item == 'nega_data':
scale = torch.eye(3) * random.uniform(5, 8)
'''
# theta = 0 * math.pi
# rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
# [-math.sin(theta), math.cos(theta), 0],
# [0, 0, 1]]).float()
#
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
# m = torch.matmul(scale, rotationz)
m = scale
coords = torch.matmul(coords.float(), m)
return coords
| 21,735 | 35.469799 | 104 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
CLIP2Scene
|
CLIP2Scene-main/utils/testfiles.py
|
import os
import copy
import torch
import numpy as np
from PIL import Image
# import MinkowskiEngine as ME
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from torchsparse.utils.quantize import sparse_quantize
import json
from petrel_client.client import Client
import cv2
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def minkunet_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
superpixels,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
print(coords_batch.size())
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
superpixels_batch = torch.tensor(np.concatenate(superpixels))
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes": inverse_indexes,
"superpixels": superpixels_batch,
}
class NuScenesMatchDataset(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
# phase,
# config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
# self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.cylinder = True
self.voxel_size = 0.1
# self.voxel_size = config["voxel_size"]
# self.cylinder = config["cylindrical_coordinates"]
# self.superpixels_type = config["superpixels_type"]
# self.bilinear_decoder = config["decoder"] == "bilinear"
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="s3://dataset/nuScenes/", verbose=False
)
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = 1
except KeyError:
skip_ratio = 1
skip_counter = 0
self.dataroot = "s3://liuyouquan/nuScenes" #todo
# self.dataroot = "s3://dataset/nuScenes"
self.client = Client('~/.petreloss.conf')
# print(phase)
# if phase == "train":
# f = open('./list_keyframes_train.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_train.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
#
# elif phase == "val":
# f = open('./list_keyframes_val.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_val.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
#
# elif phase == "parametrizing":
# with open('./list_keyframes_parametrizing.json', 'r') as f:
# self.list_keyframes = json.load(f)
#
# f1 = open('./save_dict_train.json', 'r')
# content = f1.read()
# self.frames_corrs_info = json.loads(content)
# f1.close()
# # phase_scenes = list(
# # set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
# # )
# elif phase == "verifying":
# phase_scenes = CUSTOM_SPLIT
with open('./list_keyframes_parametrizing.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_train.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
# print(data1[key_["LIDAR_TOP"]])
# pcl_path = os.path.join("s3://liuyouquan/nuScenes/", data1[key_["LIDAR_TOP"]][0].replace("samples", ""))
# pcl_path = "s3://liuyouquan/nuScenes/" + data1[key_["LIDAR_TOP"]][0].replace("samples", "")
# f = open('./list_keyframes_parametrizing.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_parametrizing.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
# phase_scenes = list(
# print(self.list_keyframes)
# print(type(self.list_keyframes))
# create a list of camera & lidar scans
# for scene_idx in range(len(self.nusc.scene)):
# scene = self.nusc.scene[scene_idx]
# if scene["name"] in phase_scenes:
# skip_counter += 1
# if skip_counter % skip_ratio == 0:
# self.create_list_of_scans(scene)
# def create_list_of_scans(self, scene):
# # Get first and last keyframe in the scene
# current_sample_token = scene["first_sample_token"]
# # Loop to get all successive keyframes
# list_data = []
# while current_sample_token != "":
# current_sample = self.nusc.get("sample", current_sample_token) #TODO
# list_data.append(current_sample["data"])
# current_sample_token = current_sample["next"]
#
# # Add new scans in the list
# self.list_keyframes.extend(list_data)
def map_pointcloud_to_image(self, data, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
# pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
key_ = data["LIDAR_TOP"]
pcl_path = "s3://liuyouquan/nuScenes" + self.frames_corrs_info[key_][0].replace("samples", "")
# print(pcl_path)
# pcl_path = os.path.join("s3://liuyouquan/nuScenes/", self.frames_corrs_info[key_][0].replace("samples",""))
# print(pcl_path)
# try:
# pc_original = LidarPointCloud.from_file(pcl_path)
# # print("pcl_path: ", pcl_path)
# pc_ref = pc_original.points
# except Exception as e:
# print("pcl_path: ", pcl_path)
images = []
superpixels = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
tot = 0
camera_info = self.frames_corrs_info[key_][1]
for i, camera_name in enumerate(camera_list):
# pc = copy.deepcopy(pc_original)
# cam = self.nusc.get("sample_data", data[camera_name]) #todo
camera_path = camera_info[camera_name]["camera_name"]
# print(pc_ref.shape)
# import pdb
# pdb.set_trace()
# camera_path = "samples/CAM_FRONT/n008-2018-07-27-12-07-38-0400__CAM_FRONT__1532707811012460.jpg"
try:
img_bytes = self.client.get(self.dataroot + "/" + camera_path, update_cache=True)
assert img_bytes is not None
# print(camera_path)
except Exception as e:
tot += 1
print(camera_path)
continue
return tot
# img_bytes = self.client.get("s3://dataset/nuScenes/samples/CAM_FRONT/n015-2018-07-18-11-07-57+0800__CAM_FRONT__1531883530412470.jpg", update_cache=True)
# assert img_bytes is not None
img_mem_view = memoryview(img_bytes)
buffer = np.frombuffer(img_mem_view, np.uint8)
im = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
# cv2.imwrite("ttt.jpg", im)
# im = im.reshape(im_shape)
im = np.array(im)
# import pdb
# pdb.set_trace()
# print(im.shape)
# print(im.shape)
# sp = Image.open(
# f"superpixels/nuscenes/"
# f"superpixels_{self.superpixels_type}/{camera_info[camera_name]['token']}.png"
# )
# superpixels.append(np.array(sp))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
# cs_record = self.nusc.get(
# "calibrated_sensor", pointsensor["calibrated_sensor_token"]
# )
cs_record = camera_info[camera_name]["cs_record"]
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
# poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
poserecord = camera_info[camera_name]["poserecord"]
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
# poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
poserecord = camera_info[camera_name]["poserecord_"]
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
# cs_record = self.nusc.get(
# "calibrated_sensor", cam["calibrated_sensor_token"]
# )
cs_record = camera_info[camera_name]["cs_record_"]
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
#
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
# return tot
return pc_ref.T, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_keyframes)
def getitem(self, idx):
# tot = self.map_pointcloud_to_image(self.list_keyframes[idx])
# return tot
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(self.list_keyframes[idx])
# superpixels = torch.tensor(superpixels)
intensity = torch.tensor(pc[:, 3:])
pc = torch.tensor(pc[:, :3])
# print(images)
# import pdb
# pdb.set_trace()
#
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
# if self.cloud_transforms:
# pc = self.cloud_transforms(pc)
# if self.mixed_transforms:
# (
# pc,
# intensity,
# images,
# pairing_points,
# pairing_images,
# superpixels,
# ) = self.mixed_transforms(
# pc, intensity, images, pairing_points, pairing_images
# )
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
phi = torch.atan2(y, x) * 180 / np.pi # corresponds to a split each 1°
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
#
# # Voxelization with MinkowskiEngine
discrete_coords, indexes, inverse_indexes = sparse_quantize(
coords_aug.contiguous().numpy(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# # indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
#
unique_feats = intensity[indexes]
#
discrete_coords = torch.cat(
(
discrete_coords,
torch.zeros(discrete_coords.shape[0], 1, dtype=torch.int32),
),
1,
)
# return
return (
discrete_coords,
unique_feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
)
Dataset = NuScenesMatchDataset()
print("len: ", len(Dataset))
sum_t = 0
for i in range(len(Dataset)):
# for i in range(100):
print(i)
tot = Dataset.getitem(i)
# sum_t += tot
print("sum_t", sum_t)
| 17,559 | 37.008658 | 166 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/preprocess_scannet.py
|
import os
import sys
import time
import argparse
import json
import numpy as np
import multiprocessing as mp
from functools import partial
# sys.path.append("..")
sys.path.append("../utils")
import pc_utils
import scannet_utils
from plyfile import PlyData, PlyElement
g_label_names = scannet_utils.g_label_names
g_label_ids = scannet_utils.g_label_ids
'''
params
'''
parser = argparse.ArgumentParser()
parser.add_argument('--scannet_path', default= os.environ['HOME']+'/dataset/scannet/scans/scans',
help='path to scannet data')
parser.add_argument('--label_map_file', default= os.environ['HOME'] + '/dataset/scannet/scans/scannetv2-labels.combined.tsv',
help='path to scannetv2-labels.combined.tsv (required for label export only)')
parser.add_argument("--num_proc", required=False, type=int, default=28, help="number of parallel process, default is 30")
opt = parser.parse_args()
remapper=np.ones(150)*(-100)
for i,x in enumerate([1,2,3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39]):
remapper[x]=i
def collect_point_data(scene_name):
# read label mapping file
label_map = scannet_utils.read_label_mapping(opt.label_map_file, label_from='raw_category', label_to='nyu40id')
# Over-segmented segments: maps from segment to vertex/point IDs
data_folder = os.path.join(opt.scannet_path, scene_name)
out_filename = os.path.join(data_folder, scene_name + '_new_semantic.npy') # scene0000_00/scene0000_00.npy
# if os.path.exists(out_filename): return
# Read segmentation label
seg_filename = os.path.join(data_folder, '%s_vh_clean_2.0.010000.segs.json' % (scene_name))
seg_to_verts, num_verts = scannet_utils.read_segmentation(seg_filename)
# Read Instances segmentation label
# agg_filename = os.path.join(data_folder, '%s.aggregation.json' % (scene_name))
# object_id_to_segs, label_to_segs = scannet_utils.read_aggregation(agg_filename)
# Raw points in XYZRGBA
ply_filename = os.path.join(data_folder, '%s_vh_clean_2.ply' % (scene_name))
label_filename = os.path.join(data_folder, '%s_vh_clean_2.labels.ply' % (scene_name))
points = pc_utils.read_ply_rgba_normal(ply_filename)
# points = pc_utils.read_ply_rgba(ply_filename)
# labels = pc_utils.read_ply_rgba(label_filename)
# plydata = PlyData.read(label_filename)
# pc = plydata['vertex'].data
# pc_array = np.array([[l] for x,y,z,r,g,b,a, l in pc])
# trans_ids = np.array([[g_label_ids.index(l)] for x,y,z,r,g,b,a, l in pc])
plydata = PlyData().read(label_filename)
labels = np.expand_dims(remapper[np.array(plydata.elements[0]['label'])],1)
# trans_ids = g_label_ids.index(pc_array)
# import pdb
# pdb.set_trace()
'''
label_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
for label, segs in label_to_segs.items():
# convert scannet raw label to nyu40 label (1~40), 0 for unannotated, 41 for unknown
label_id = label_map[label]
# only evaluate 20 class in nyu40 label
# map nyu40 to 1~21, 0 for unannotated, unknown and not evalutated
if label_id in g_label_ids: # IDS for 20 classes in nyu40 for evaluation (1~21)
eval_label_id = g_label_ids.index(label_id)
else: # IDS unannotated, unknow or not for evaluation go to unannotate label (0)
eval_label_id = g_label_names.index('unannotate')
for seg in segs:
verts = seg_to_verts[seg]
label_ids[verts] = eval_label_id
'''
# for i in range(20):
# print(label_ids[i])
instance_ids = np.zeros(shape=(num_verts), dtype=np.uint32) # 0: unannotated
for object_id, segs in object_id_to_segs.items():
for seg in segs:
verts = seg_to_verts[seg]
instance_ids[verts] = object_id
for i in range(max(instance_ids)):
index = instance_ids == i
min_label = min(labels[index])
max_label = max(labels[index])
if min_label != max_label: print("error")
points = np.delete(points, 6, 1) # only RGB, ignoring A
# label_ids = np.expand_dims(label_ids, 1)
# instance_ids = np.expand_dims(instance_ids, 1)
# print(points.shape, label_ids.shape, instance_ids.shape)
# order is critical, do not change the order
# print("labels data: ", label_ids - labels)
# data = np.concatenate((points, labels, labels), 1)
data = np.concatenate((points, instance_ids, labels), 1)
# data = np.concatenate((points, instance_ids, label_ids), 1)
print(out_filename)
if os.path.exists(out_filename): return
np.save(out_filename, data)
# print(scene_name, ' points shape:', data.shape)
def preprocess_scenes(scene_name):
try:
collect_point_data(scene_name)
print("name: ", scene_name)
except Exception as e:
sys.stderr.write(scene_name + 'ERROR!!')
sys.stderr.write(str(e))
sys.exit(-1)
def main():
scenes = [d for d in os.listdir(opt.scannet_path) if os.path.isdir(os.path.join(opt.scannet_path, d))]
scenes.sort()
# collect_point_data(scenes[10])
# import pdb
# pdb.set_trace()
print(opt.scannet_path)
print('Find %d scenes' % len(scenes))
print('Extract points (Vertex XYZ, RGB, NxNyNz, Label, Instance-label)')
pool = mp.Pool(opt.num_proc)
pool.map(preprocess_scenes, scenes)
if __name__ == '__main__':
main()
| 5,424 | 37.75 | 125 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/plyfile.py
|
# Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# <http://www.gnu.org/licenses/>.
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
('int8', 'i1'),
('char', 'i1'),
('uint8', 'u1'),
('uchar', 'b1'),
('uchar', 'u1'),
('int16', 'i2'),
('short', 'i2'),
('uint16', 'u2'),
('ushort', 'u2'),
('int32', 'i4'),
('int', 'i4'),
('uint32', 'u4'),
('uint', 'u4'),
('float32', 'f4'),
('float', 'f4'),
('float64', 'f8'),
('double', 'f8')
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for (_a, _b) in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {
'ascii': '=',
'binary_little_endian': '<',
'binary_big_endian': '>'
}
_byte_order_reverse = {
'<': 'binary_little_endian',
'>': 'binary_big_endian'
}
_native_byte_order = {'little': '<', 'big': '>'}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" %
(type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append('')
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
class PlyParseError(Exception):
'''
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
'''
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ''
if self.element:
s += 'element %r: ' % self.element.name
if self.row is not None:
s += 'row %d: ' % self.row
if self.prop:
s += 'property %r: ' % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' %
self.message, self.element, self.row, self.prop)
class PlyData(object):
'''
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
'''
def __init__(self, elements=[], text=False, byte_order='=',
comments=[], obj_info=[]):
'''
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
'''
if byte_order == '=' and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ['<', '>', '=']:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in
self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
'''
Parse a PLY header from a readable file-like stream.
'''
lines = []
comments = {'comment': [], 'obj_info': []}
while True:
line = stream.readline().decode('ascii').strip()
fields = _split_line(line, 1)
if fields[0] == 'end_header':
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ['ply']:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != 'format':
raise PlyParseError("expected 'format'")
if lines[a][2] != '1.0':
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == 'ascii'
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(PlyElement._parse_multi(lines[a:]),
text, byte_order,
comments['comment'], comments['obj_info'])
@staticmethod
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
'''
Write PLY data to a writeable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'write')
try:
stream.write(self.header.encode('ascii'))
stream.write(b'\r\n')
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
'''
Provide PLY-formatted metadata for the instance.
'''
lines = ['ply']
if self.text:
lines.append('format ascii 1.0')
else:
lines.append('format ' +
_byte_order_reverse[self.byte_order] +
' 1.0')
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append('comment ' + c)
for c in self.obj_info:
lines.append('obj_info ' + c)
lines.extend(elt.header for elt in self.elements)
lines.append('end_header')
return '\r\n'.join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return ('PlyData(%r, text=%r, byte_order=%r, '
'comments=%r, obj_info=%r)' %
(self.elements, self.text, self.byte_order,
self.comments, self.obj_info))
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + 'b'))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
'''
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
'''
def __init__(self, name, properties, count, comments=[]):
'''
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
'''
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty)
for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop)
for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order='='):
'''
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
'''
return [(prop.name, prop.dtype(byte_order))
for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
'''
Parse a list of PLY element definitions.
'''
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
'''
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
'''
a = 0
line = lines[a]
if line[0] != 'element':
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == 'comment':
comments.append(lines[a][1])
elif lines[a][0] == 'property':
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments),
lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={},
comments=[]):
'''
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
'''
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are "
"supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == 'O':
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == 'O':
if len(t) != 2:
raise ValueError("non-scalar object fields not "
"supported")
len_str = _data_type_reverse[len_types.get(t[0], 'u1')]
if t[1][1] == 'O':
val_type = val_types.get(t[0], 'i4')
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
'''
Read the actual data from a PLY file.
'''
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream,
self.dtype(byte_order),
self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
'''
Write the data to a PLY file.
'''
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order),
copy=False).tofile(stream)
def _read_txt(self, stream):
'''
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b''), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line",
self, k, prop)
except ValueError:
raise PlyParseError("malformed input",
self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
'''
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
'''
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], '%.18g', newline='\r\n')
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def _write_bin(self, stream, byte_order):
'''
Save a PLY element to a binary PLY file. The element may
contain list properties.
'''
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
'''
Format this element's metadata as it would appear in a PLY
header.
'''
lines = ['element %s %d' % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append('comment ' + c)
lines.extend(list(map(str, self.properties)))
return '\r\n'.join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return ('PlyElement(%r, %r, count=%d, comments=%r)' %
(self.name, self.properties, self.count,
self.comments))
class PlyProperty(object):
'''
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
'''
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == 'property'
if line[1] == 'list':
if len(line) > 5:
raise PlyParseError("too many fields after "
"'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after "
"'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after "
"'property'")
if len(line) < 3:
raise PlyParseError("too few fields after "
"'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order='='):
'''
Return the numpy dtype description for this property (as a tuple
of strings).
'''
return byte_order + self.val_dtype
def _from_fields(self, fields):
'''
Parse from generator. Raise StopIteration if the property could
not be read.
'''
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
'''
Return generator over one item.
'''
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
'''
Read data from a binary stream. Raise StopIteration if the
property could not be read.
'''
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return 'property %s %s' % (val_str, self.name)
def __repr__(self):
return 'PlyProperty(%r, %r)' % (self.name,
_lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
'''
PLY list property description.
'''
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order='='):
'''
List properties always have a numpy dtype of "object".
'''
return '|O'
def list_dtype(self, byte_order='='):
'''
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
'''
return (byte_order + self.len_dtype,
byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
'''
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
'''
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
'''
Write data to a binary stream.
'''
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return 'property list %s %s %s' % (len_str, val_str, self.name)
def __repr__(self):
return ('PlyListProperty(%r, %r, %r)' %
(self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype)))
| 26,329 | 27.744541 | 72 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/convert_clip_weights.py
|
import torch
import clip
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Extract and save the CLIP visual weights')
parser.add_argument('--model', default='RN50', choices=['RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT32', 'ViT16', 'ViT14'], help='clip model name')
parser.add_argument('--backbone', action='store_true', help='Prepend the word backbone to the key so that it can be directly loaded as a checkpoint')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
name_mapping = {'RN50': 'RN50', 'RN101': 'RN101', 'RN50x4': 'RN50x4', \
'RN50x16': 'RN50x16', 'RN50x64': 'RN50x64', \
'ViT32': 'ViT-B/32', 'ViT16': 'ViT-B/16', 'ViT14': 'ViT-L/14'}
clip_model, preprocess = clip.load(name_mapping[args.model], device='cpu')
state_dict = clip_model.state_dict()
result_model = {'meta': {}, 'state_dict': {}}
all_model = dict()
stem_mapping = {'conv1': 0, 'bn1': 1, 'conv2': 3, 'bn2': 4, 'conv3': 6, 'bn3':7}
clip_keys = []
prefix = 'visual'
for key in state_dict.keys():
if 'ViT' in args.model and prefix in key:
new_key = key[len(f'{prefix}.'):]
if new_key == 'proj':
all_model['proj'] = {}
all_model['proj']['weight'] = state_dict[key].float().t()
continue
if new_key == 'class_embedding':
new_key = 'cls_token'
state_dict[key] = state_dict[key][None, None, :]
elif new_key == 'positional_embedding':
new_key = 'pos_embed'
state_dict[key] = state_dict[key][None, :, :]
elif new_key == 'conv1.weight':
new_key = 'patch_embed.projection.weight'
elif 'ln_pre' in new_key:
weight_or_bias = new_key.split('.')[-1]
new_key = f'ln0.{weight_or_bias}'
elif 'ln_post' in new_key:
weight_or_bias = new_key.split('.')[-1]
new_key = f'ln1.{weight_or_bias}'
elif 'transformer' in new_key:
new_key = 'layers.' + new_key[len('transformer.resblocks.'):]
if 'mlp' in new_key:
new_key = new_key.replace('mlp', 'ffn.layers')
if 'c_fc' in new_key:
new_key = new_key.replace('c_fc', '0.0')
if 'c_proj' in new_key:
new_key = new_key.replace('c_proj', '1')
if 'attn' in new_key:
new_key = new_key.replace('attn', 'attn.attn')
elif 'ln_' in new_key:
new_key = new_key.replace('ln_', 'ln')
if args.backbone:
new_key = 'backbone.' + new_key
clip_keys.append(new_key)
result_model['state_dict'].update({new_key: state_dict[key].float()})
elif prefix in key:
if 'attnpool' in key:
if 'proj' in key:
proj_name = key.split('.')[2]
weight_or_bias = key.split('.')[3]
if proj_name not in all_model:
all_model[proj_name] = {}
all_model[proj_name][weight_or_bias] = state_dict[key].float()
else:
new_key = key[len(f'{prefix}.'):]
if 'layer' not in new_key:
layer_name, layer_type = new_key.split('.')
new_key = 'stem.{}.{}'.format(stem_mapping[layer_name], layer_type)
if 'downsample' in new_key:
splits = new_key.split('.')
new_key = '{}.{}.{}.{}.{}'.format(splits[0], splits[1], splits[2], \
int(splits[3])+1, splits[4])
if args.backbone:
new_key = 'backbone.' + new_key
clip_keys.append(new_key)
result_model['state_dict'].update({new_key: state_dict[key].float()})
if args.backbone:
torch.save(result_model, f'{args.model}_clip_backbone.pth')
else:
all_model['clip'] = result_model['state_dict']
torch.save(all_model, '{}_clip_weights.pth'.format(args.model))
| 4,232 | 46.033333 | 160 |
py
|
CLIP2Scene
|
CLIP2Scene-main/utils/transforms.py
|
import torch
import random
import numpy as np
# from torchvision.transforms import InterpolationMode
from torchvision.transforms import RandomResizedCrop
from torchvision.transforms.functional import resize, resized_crop, hflip
import math
class ComposeClouds:
"""
Compose multiple transformations on a point cloud.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, pc):
for transform in self.transforms:
pc = transform(pc)
return pc
class Rotation_z:
"""
Random rotation of a point cloud around the z axis.
"""
def __init__(self):
pass
def __call__(self, pc):
angle = np.random.random() * 2 * np.pi
c = np.cos(angle)
s = np.sin(angle)
R = torch.tensor(
[[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]], dtype=torch.float32
)
pc = pc @ R.T
return pc
class FlipAxis:
"""
Flip a point cloud in the x and/or y axis, with probability p for each.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, pc):
for curr_ax in range(2):
if random.random() < self.p:
pc[:, curr_ax] = -pc[:, curr_ax]
return pc
class random_rotation_scalling_flipping:
def __init__(self, p=0.5):
self.p = p
def __call__(self, coords):
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
def make_transforms_clouds(config):
"""
Read the config file and return the desired transformation on point clouds.
"""
transforms = []
if config["transforms_clouds"] is not None:
for t in config["transforms_clouds"]:
if config['dataset'] == 'scannet' and config['mode'] == 'finetune':
transforms.append(random_rotation_scalling_flipping())
# print("sssss")
else:
if t.lower() == "rotation":
transforms.append(Rotation_z())
elif t.lower() == "flipaxis":
transforms.append(FlipAxis())
else:
raise Exception(f"Unknown transformation: {t}")
if not len(transforms):
return None
return ComposeClouds(transforms)
class ComposeAsymmetrical:
"""
Compose multiple transformations on a point cloud, and image and the
intricate pairings between both (only available for the heavy dataset).
Note: Those transformations have the ability to increase the number of
images, and drastically modify the pairings
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, pc, features, img, pairing_points, pairing_images, superpixels=None):
for transform in self.transforms:
pc, features, img, pairing_points, pairing_images, superpixels = transform(
pc, features, img, pairing_points, pairing_images, superpixels
)
if superpixels is None:
return pc, features, img, pairing_points, pairing_images
return pc, features, img, pairing_points, pairing_images, superpixels
class ResizedCrop:
"""
Resize and crop an image, and adapt the pairings accordingly.
"""
def __init__(
self,
image_crop_size=(224, 416),
image_crop_range=[0.3, 1.0],
image_crop_ratio=(14.0 / 9.0, 17.0 / 9.0),
crop_center=False,
):
self.crop_size = image_crop_size
self.crop_range = image_crop_range
self.crop_ratio = image_crop_ratio
# self.img_interpolation = image_interpolation
self.crop_center = crop_center
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
imgs = torch.empty(
(images.shape[0], 3) + tuple(self.crop_size), dtype=torch.float32
)
if superpixels is not None:
superpixels = superpixels.unsqueeze(1)
sps = torch.empty(
(images.shape[0],) + tuple(self.crop_size), dtype=torch.uint8
)
pairing_points_out = np.empty(0, dtype=np.int64)
pairing_images_out = np.empty((0, 3), dtype=np.int64)
if self.crop_center:
pairing_points_out = pairing_points
_, _, h, w = images.shape
for id, img in enumerate(images):
mask = pairing_images[:, 0] == id
p2 = pairing_images[mask]
p2 = np.round(
np.multiply(p2, [1.0, self.crop_size[0] / h, self.crop_size[1] / w])
).astype(np.int64)
imgs[id] = resize(img, self.crop_size)
if superpixels is not None:
sps[id] = resize(
superpixels[id], self.crop_size, InterpolationMode.NEAREST
)
p2[:, 1] = np.clip(0, self.crop_size[0] - 1, p2[:, 1])
p2[:, 2] = np.clip(0, self.crop_size[1] - 1, p2[:, 2])
pairing_images_out = np.concatenate((pairing_images_out, p2))
else:
for id, img in enumerate(images):
successfull = False
mask = pairing_images[:, 0] == id
P1 = pairing_points[mask]
P2 = pairing_images[mask]
while not successfull:
i, j, h, w = RandomResizedCrop.get_params(
img, self.crop_range, self.crop_ratio
)
p1 = P1.copy()
p2 = P2.copy()
p2 = np.round(
np.multiply(
p2 - [0, i, j],
[1.0, self.crop_size[0] / h, self.crop_size[1] / w],
)
).astype(np.int64)
valid_indexes_0 = np.logical_and(
p2[:, 1] < self.crop_size[0], p2[:, 1] >= 0
)
valid_indexes_1 = np.logical_and(
p2[:, 2] < self.crop_size[1], p2[:, 2] >= 0
)
valid_indexes = np.logical_and(valid_indexes_0, valid_indexes_1)
sum_indexes = valid_indexes.sum()
len_indexes = len(valid_indexes)
if sum_indexes > 1024 or sum_indexes / len_indexes > 0.75:
successfull = True
imgs[id] = resized_crop(
img, i, j, h, w, self.crop_size
)
if superpixels is not None:
sps[id] = resized_crop(
superpixels[id],
i,
j,
h,
w,
self.crop_size,
)
pairing_points_out = np.concatenate(
(pairing_points_out, p1[valid_indexes])
)
pairing_images_out = np.concatenate(
(pairing_images_out, p2[valid_indexes])
)
if superpixels is None:
return pc, features, imgs, pairing_points_out, pairing_images_out, superpixels
return pc, features, imgs, pairing_points_out, pairing_images_out, sps
class FlipHorizontal:
"""
Flip horizontaly the image with probability p and adapt the matching accordingly.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
w = images.shape[3]
for i, img in enumerate(images):
if random.random() < self.p:
images[i] = hflip(img)
mask = pairing_images[:, 0] == i
pairing_images[mask, 2] = w - 1 - pairing_images[mask, 2]
return pc, features, images, pairing_points, pairing_images, superpixels
class DropCuboids:
"""
Drop random cuboids in a cloud
"""
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
range_xyz = torch.max(pc, axis=0)[0] - torch.min(pc, axis=0)[0]
crop_range = np.random.random() * 0.2
new_range = range_xyz * crop_range / 2.0
sample_center = pc[np.random.choice(len(pc))]
max_xyz = sample_center + new_range
min_xyz = sample_center - new_range
upper_idx = torch.sum((pc[:, 0:3] < max_xyz).to(torch.int32), 1) == 3
lower_idx = torch.sum((pc[:, 0:3] > min_xyz).to(torch.int32), 1) == 3
new_pointidx = ~((upper_idx) & (lower_idx))
pc_out = pc[new_pointidx]
features_out = features[new_pointidx]
mask = new_pointidx[pairing_points]
cs = torch.cumsum(new_pointidx, 0) - 1
pairing_points_out = pairing_points[mask]
pairing_points_out = cs[pairing_points_out]
pairing_images_out = pairing_images[mask]
successfull = True
for id in range(len(images)):
if np.sum(pairing_images_out[:, 0] == id) < 1024:
successfull = False
if successfull:
return (
pc_out,
features_out,
images,
np.array(pairing_points_out),
np.array(pairing_images_out),
)
return pc, features, images, pairing_points, pairing_images, superpixels
def make_transforms_asymmetrical(config):
"""
Read the config file and return the desired mixed transformation.
"""
transforms = []
if config["transforms_mixed"] is not None:
for t in config["transforms_mixed"]:
if t.lower() == "resizedcrop":
# pass
transforms.append(
ResizedCrop(
image_crop_size=config["crop_size"],
image_crop_ratio=config["crop_ratio"],
)
)
elif t.lower() == "fliphorizontal":
transforms.append(FlipHorizontal())
elif t.lower() == "dropcuboids":
transforms.append(DropCuboids())
else:
raise Exception(f"Unknown transformation {t}")
if not len(transforms):
return None
return ComposeAsymmetrical(transforms)
def make_transforms_asymmetrical_val(config):
"""
Read the config file and return the desired mixed transformation
for the validation only.
"""
transforms = []
if config["transforms_mixed"] is not None:
for t in config["transforms_mixed"]:
if t.lower() == "resizedcrop":
# pass
transforms.append(
ResizedCrop(image_crop_size=config["crop_size"], crop_center=True)
)
if not len(transforms):
return None
return ComposeAsymmetrical(transforms)
| 11,427 | 33.841463 | 95 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/clip_model.py
|
import torch.nn as nn
import torch.nn.functional as F
import clip
class ClipFeatureExtractor(nn.Module):
"""
DINO Vision Transformer Feature Extractor.
"""
def __init__(self, config, preprocessing=None):
super(ClipFeatureExtractor, self).__init__()
self.encoder, preprocess = clip.load("ViT-B/32", device="cuda")
for param in self.encoder.parameters():
param.requires_grad = False
# self.decoder = nn.Sequential(
# nn.Conv2d(embed_dim, config["model_n_out"], 1),
# nn.Upsample(scale_factor=patch_size, mode="bilinear", align_corners=True),
# )
self.preprocessing = preprocess
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
batch_size, _, height, width = x.size()
print(x.size())
x = self.encoder(x)
# the output of x should be [batch_size x (1 + f_height * f_width) x self.embed_dim]
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
| 1,158 | 28.717949 | 92 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/image_model.py
|
import os
import torch
import requests
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
from model.modules.resnet_encoder import resnet_encoders
import model.modules.dino.vision_transformer as dino_vit
import clip
_MEAN_PIXEL_IMAGENET = [0.485, 0.456, 0.406]
_STD_PIXEL_IMAGENET = [0.229, 0.224, 0.225]
def adapt_weights(architecture):
if architecture == "imagenet" or architecture is None:
return
weights_url = {
"moco_v2": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar",
"moco_v1": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v1_200ep/moco_v1_200ep_pretrain.pth.tar",
"swav": "https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar",
"deepcluster_v2": "https://dl.fbaipublicfiles.com/deepcluster/deepclusterv2_800ep_pretrain.pth.tar",
"dino": "https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
}
if not os.path.exists(f"weights/{architecture}.pt"):
r = requests.get(weights_url[architecture], allow_redirects=True)
os.makedirs("weights", exist_ok=True)
with open(f"weights/{architecture}.pt", 'wb') as f:
f.write(r.content)
weights = torch.load(f"weights/{architecture}.pt")
if architecture == "obow":
return weights["network"]
if architecture == "pixpro":
weights = {
k.replace("module.encoder.", ""): v
for k, v in weights["model"].items()
if k.startswith("module.encoder.")
}
return weights
if architecture in ("moco_v1", "moco_v2", "moco_coco"):
weights = {
k.replace("module.encoder_q.", ""): v
for k, v in weights["state_dict"].items()
if k.startswith("module.encoder_q.") and not k.startswith("module.encoder_q.fc")
}
return weights
if architecture in ("swav", "deepcluster_v2"):
weights = {
k.replace("module.", ""): v
for k, v in weights.items()
if k.startswith("module.") and not k.startswith("module.pro")
}
return weights
if architecture == "dino":
return weights
class Preprocessing:
"""
Use the ImageNet preprocessing.
"""
def __init__(self):
normalize = T.Normalize(mean=_MEAN_PIXEL_IMAGENET, std=_STD_PIXEL_IMAGENET)
self.preprocessing_img = normalize
def __call__(self, image):
return self.preprocessing_img(image)
class DilationFeatureExtractor(nn.Module):
"""
Dilated ResNet Feature Extractor
"""
def __init__(self, config, preprocessing=None):
super(DilationFeatureExtractor, self).__init__()
assert (
config["images_encoder"] == "resnet50"
), "DilationFeatureExtractor is only available for resnet50"
Encoder = resnet_encoders["resnet50"]["encoder"]
params = resnet_encoders["resnet50"]["params"]
params.update(replace_stride_with_dilation=[True, True, True])
self.encoder = Encoder(**params)
if config["image_weights"] == "imagenet":
self.encoder.load_state_dict(model_zoo.load_url(model_urls["resnet50"]))
weights = adapt_weights(architecture=config["image_weights"])
if weights is not None:
self.encoder.load_state_dict(weights)
for param in self.encoder.parameters():
param.requires_grad = False
in1 = 2048
self.decoder = nn.Sequential(
nn.Conv2d(in1, config["model_n_out"], 1),
nn.Upsample(scale_factor=4, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
self.channel_avgpooling = nn.AvgPool2d((32, 1), stride=(32, 1))
self.upsample4 = nn.Upsample(scale_factor=4, mode="bilinear", align_corners=True)
def forward(self, x):
import pdb
pdb.set_trace()
if self.preprocessing:
x = self.preprocessing(x)
x = self.encoder(x)
# x = self.channel_avgpooling(x.permute(0, 2, 1, 3))
# x = self.upsample4(x.permute(0, 2, 1, 3))
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
class PPKTFeatureExtractor(nn.Module):
"""
PPKT baseline
"""
def __init__(self, config, preprocessing=None):
super(PPKTFeatureExtractor, self).__init__()
Encoder = resnet_encoders[config["images_encoder"]]["encoder"]
params = resnet_encoders[config["images_encoder"]]["params"]
self.encoder = Encoder(**params)
if config["image_weights"] == "imagenet":
self.encoder.load_state_dict(model_zoo.load_url(model_urls[config["images_encoder"]]))
if config["image_weights"] not in (None, "imagenet"):
assert (
config["images_encoder"] == "resnet50"
), "{} weights are only available for resnet50".format(
config["images_weights"]
)
weights = adapt_weights(architecture=config["image_weights"])
if weights is not None:
self.encoder.load_state_dict(weights)
for param in self.encoder.parameters():
param.requires_grad = False
if config["images_encoder"] == "resnet18":
in1 = 512
elif config["images_encoder"] == "resnet50":
in1 = 2048
self.decoder = nn.Sequential(
nn.Conv2d(in1, config["model_n_out"], 1),
nn.Upsample(scale_factor=32, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
x = self.decoder(self.encoder(x))
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
class DinoVitFeatureExtractor(nn.Module):
"""
DINO Vision Transformer Feature Extractor.
"""
def __init__(self, config, preprocessing=None):
super(DinoVitFeatureExtractor, self).__init__()
dino_models = {
"vit_small_p16": ("vit_small", 16, 384),
"vit_small_p8": ("vit_small", 8, 384),
"vit_base_p16": ("vit_base", 16, 768),
"vit_base_p8": ("vit_base", 8, 768),
}
assert (
config["images_encoder"] in dino_models.keys()
), f"DilationFeatureExtractor is only available for {dino_models.keys()}"
model_name, patch_size, embed_dim = dino_models[config["images_encoder"]]
print("Use Vision Transformer pretrained with DINO as the image encoder")
print(f"==> model_name: {model_name}")
print(f"==> patch_size: {patch_size}")
print(f"==> embed_dim: {embed_dim}")
self.patch_size = patch_size
self.embed_dim = embed_dim
self.encoder = dino_vit.__dict__[model_name](patch_size=patch_size, num_classes=0)
dino_vit.load_pretrained_weights(self.encoder, "", None, model_name, patch_size)
for param in self.encoder.parameters():
param.requires_grad = False
self.decoder = nn.Sequential(
nn.Conv2d(embed_dim, config["model_n_out"], 1),
nn.Upsample(scale_factor=patch_size, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
batch_size, _, height, width = x.size()
assert (height % self.patch_size) == 0
assert (width % self.patch_size) == 0
f_height = height // self.patch_size
f_width = width // self.patch_size
x = self.encoder(x, all=True)
# the output of x should be [batch_size x (1 + f_height * f_width) x self.embed_dim]
assert x.size(1) == (1 + f_height * f_width)
# Remove the CLS token and reshape the the patch token features.
x = x[:, 1:, :].contiguous().transpose(1, 2).contiguous().view(batch_size, self.embed_dim, f_height, f_width)
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
| 8,571 | 34.27572 | 119 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/fusionNet.py
|
import os
import torch
import requests
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
from model.modules.resnet_encoder import resnet_encoders
import model.modules.dino.vision_transformer as dino_vit
class fusionNet(nn.Module):
"""
Dilated ResNet Feature Extractor
"""
def __init__(self, config):
super().__init__()
self.config = config
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.img_size = (224, 416)
self.t = 1
def forward(self, feature_packages):
# feature_packages size: voxelSize * 8 * 1537
# pixel_feature, point_feature, text_embedding, pred = feature_packages[:, :, :512], feature_packages[:, :, 512:1024], feature_packages[:, :, 1024:1536], feature_packages[:, :, -1]
pixel_feature, point_feature, pred = feature_packages[:, :, :512], feature_packages[:, :, 512:1024], feature_packages[:, :, -1]
pixel_pred = pred[:, 0].long()
text_embedding = self.text_embeddings[pixel_pred].unsqueeze(1)
pixel_point_feature = point_feature
pixel_point_attention = torch.sum(pixel_point_feature * text_embedding, dim=2)
index_point_sum = torch.sum(pixel_point_attention, dim=1) != 0
pixel_point_attention = pixel_point_attention[index_point_sum] / self.t
pixel_point_feature = pixel_point_feature[index_point_sum]
pixel_pred = pixel_pred[index_point_sum]
attention_union_sparse = pixel_point_attention.to_sparse()
attention_union_dense = torch.sparse.softmax(attention_union_sparse, dim=1).to_dense()
fusion_feature = torch.sum(attention_union_dense.unsqueeze(-1) * pixel_point_feature, dim=1)
inner_products = torch.sigmoid(torch.sum(fusion_feature.unsqueeze(1) * pixel_point_feature, dim=2))
return fusion_feature, inner_products, pixel_pred
| 2,487 | 42.649123 | 188 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/resnet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://arxiv.org/abs/2007.10985
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiNetwork
from model.modules.common import ConvType, NormType, get_norm, conv, sum_pool
class Model(MinkowskiNetwork):
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.opt.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D,
)
self.bn1 = get_norm(
NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum
)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(
kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D
)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1),
)
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1),
)
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1),
)
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1),
)
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
D=D,
)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(
self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1,
):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D,
),
get_norm(
norm_type,
planes * block.expansion,
D=self.D,
bn_momentum=bn_momentum,
),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D,
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
| 5,183 | 28.123596 | 88 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/spconv_backbone.py
|
from functools import partial
import numpy as np
import spconv
import torch.nn as nn
def post_act_block(
in_channels,
out_channels,
kernel_size,
indice_key=None,
stride=1,
padding=0,
conv_type="subm",
norm_fn=None,
):
if conv_type == "subm":
conv = spconv.SubMConv3d(
in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key
)
elif conv_type == "spconv":
conv = spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
indice_key=indice_key,
)
elif conv_type == "inverseconv":
conv = spconv.SparseInverseConv3d(
in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False
)
elif conv_type == "transposeconv":
conv = spconv.SparseConvTranspose3d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, indice_key=indice_key
)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None
):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key="subm1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
)
last_pad = 0
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
64,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
"x_conv1": 16,
"x_conv2": 32,
"x_conv3": 64,
"x_conv4": 64,
}
def forward(self, input_sp_tensor):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
return out
class VoxelResBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
128,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
)
last_pad = 0
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
128,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
"x_conv1": 16,
"x_conv2": 32,
"x_conv3": 64,
"x_conv4": 128,
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = (
batch_dict["voxel_features"],
batch_dict["voxel_coords"],
)
batch_size = batch_dict["batch_size"]
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size,
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update(
{"encoded_spconv_tensor": out, "encoded_spconv_tensor_stride": 8}
)
batch_dict.update(
{
"multi_scale_3d_features": {
"x_conv1": x_conv1,
"x_conv2": x_conv2,
"x_conv3": x_conv3,
"x_conv4": x_conv4,
}
}
)
return batch_dict
class HeightCompression(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, encoded_spconv_tensor):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
# encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
return spatial_features
class VoxelNet(VoxelBackBone8x):
def __init__(self, in_channels, out_channels, config, D=3):
self.bev_stride = 8
voxel_size = [0.1, 0.1, 0.2] # nuScenes
point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
self.grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size).astype(int)[::-1]
self.bach_size = config["batch_size"]
super().__init__(in_channels, self.grid_size)
self.final = spconv.SparseConv3d(
128,
out_channels // 1,
1,
stride=1,
padding=0,
bias=False,
indice_key="final",
)
self.height_compression = HeightCompression()
def forward(self, voxels, coordinates):
sp_tensor = spconv.SparseConvTensor(
features=voxels,
indices=coordinates,
spatial_shape=self.grid_size,
batch_size=self.bach_size
)
sp_tensor = super(VoxelNet, self).forward(sp_tensor)
sp_tensor = self.final(sp_tensor)
sp_tensor = self.height_compression(sp_tensor)
return sp_tensor
| 12,158 | 28.512136 | 117 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/spvcnn.py
|
import torchsparse
import torchsparse.nn as spnn
import torch
import torch.nn.functional as F
import numpy as np
import pickle
from torch import nn
from torchsparse import PointTensor
from torchsparse import SparseTensor
from torchsparse.nn.utils import fapply
import torch
import torch.nn as nn
import torch.nn.functional as F
# from .range_utils import resample_grid_stacked
import torch
from torch.nn import functional as F1
# import range_utils.nn.functional as rnf
import torch
import torchsparse.nn.functional as F
from torchsparse import PointTensor, SparseTensor
from torchsparse.nn.utils import get_kernel_offsets
import os
# z: PointTensor
# return: SparseTensor
def initial_voxelize(z, init_res, after_res):
new_float_coord = torch.cat(
[(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)
pc_hash = F.sphash(torch.floor(new_float_coord).int())
sparse_hash = torch.unique(pc_hash)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), len(sparse_hash))
inserted_coords = F.spvoxelize(torch.floor(new_float_coord), idx_query,
counts)
inserted_coords = torch.round(inserted_coords).int()
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)
new_tensor.cmaps.setdefault(new_tensor.stride, new_tensor.coords)
z.additional_features['idx_query'][1] = idx_query
z.additional_features['counts'][1] = counts
z.C = new_float_coord
return new_tensor
# x: SparseTensor, z: PointTensor
# return: SparseTensor
def point_to_voxel(x, z):
if z.additional_features is None or z.additional_features.get(
'idx_query') is None or z.additional_features['idx_query'].get(
x.s) is None:
pc_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1))
sparse_hash = F.sphash(x.C)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), x.C.shape[0])
z.additional_features['idx_query'][x.s] = idx_query
z.additional_features['counts'][x.s] = counts
else:
idx_query = z.additional_features['idx_query'][x.s]
counts = z.additional_features['counts'][x.s]
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, x.C, x.s)
new_tensor.cmaps = x.cmaps
new_tensor.kmaps = x.kmaps
return new_tensor
# x: SparseTensor, z: PointTensor
# return: PointTensor
def voxel_to_point(x, z, nearest=False):
if z.idx_query is None or z.weights is None or z.idx_query.get(
x.s) is None or z.weights.get(x.s) is None:
off = get_kernel_offsets(2, x.s, 1, device=z.F.device)
old_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1), off)
pc_hash = F.sphash(x.C.to(z.F.device))
idx_query = F.sphashquery(old_hash, pc_hash)
weights = F.calc_ti_weights(z.C, idx_query,
scale=x.s[0]).transpose(0, 1).contiguous()
idx_query = idx_query.transpose(0, 1).contiguous()
if nearest:
weights[:, 1:] = 0.
idx_query[:, 1:] = -1
new_feat = F.spdevoxelize(x.F, idx_query, weights)
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
new_tensor.idx_query[x.s] = idx_query
new_tensor.weights[x.s] = weights
z.idx_query[x.s] = idx_query
z.weights[x.s] = weights
else:
new_feat = F.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
return new_tensor
save_ceph = False
if save_ceph:
from petrel_client.client import Client
ceph_client = Client()
__all__ = ['SPVCNN']
class SyncBatchNorm(nn.SyncBatchNorm):
def forward(self, input: SparseTensor) -> SparseTensor:
return fapply(input, super().forward)
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
SyncBatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transposed=True),
SyncBatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
SyncBatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation,
stride=1),
SyncBatchNorm(outc),
)
if inc == outc * self.expansion and stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
spnn.Conv3d(inc, outc * self.expansion, kernel_size=1, dilation=1,
stride=stride),
SyncBatchNorm(outc * self.expansion),
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc, outc, 1, bias=False),
SyncBatchNorm(outc),
spnn.Conv3d(outc, outc, ks, stride, bias=False, dilation=dilation),
SyncBatchNorm(outc),
spnn.Conv3d(outc, outc * self.expansion, 1, bias=False),
SyncBatchNorm(outc * self.expansion)
)
if inc == outc * self.expansion and stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
spnn.Conv3d(inc, outc * self.expansion, kernel_size=1, dilation=1,
stride=stride),
SyncBatchNorm(outc * self.expansion),
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class BaseSegmentor(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
# self.dataset = dataset
# self.class_names = dataset.class_names
def load_params(self, model_state_disk, strict=False):
my_model_dict = self.state_dict()
part_load = {}
for k in model_state_disk.keys():
value = model_state_disk[k]
if k.startswith("module."):
k = k[len("module."):]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
part_load[k] = value
return self.load_state_dict(part_load, strict=strict)
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
model_state_disk = torch.load(filename, map_location=loc_type)
if 'model_state' in model_state_disk:
model_state_disk = model_state_disk['model_state']
msg = self.load_params(model_state_disk)
logger.info(f"==> Done {msg}")
def forward(self, batch_dict):
raise NotImplementedError
class SPVCNN(nn.Module):
def _make_layer(self, block, out_channels, num_block, stride=1):
layers = []
layers.append(block(self.in_channels, out_channels, stride=stride))
self.in_channels = out_channels * block.expansion
for _ in range(1, num_block):
layers.append(block(self.in_channels, out_channels))
return layers
# (self, in_channels, out_channels, config, D=3):
# def __init__(self, model_cfg, num_class, dataset=None):
def __init__(self, in_channels, num_class, config):
super().__init__()
self.name = "spvcnn"
self.in_feature_dim = in_channels
self.num_class = num_class
self.config = config
# Default is MinkUNet50
# self.num_layer = model_cfg.get('NUM_LAYER', [2, 3, 4, 6, 2, 2, 2, 2])
# [2, 3, 4, 6, 2, 2, 2, 2]
self.num_layer = [2, 2, 2, 2, 2, 2, 2, 2]
# self.num_layer = [2, 3, 4, 6, 2, 2, 2, 2]
self.block = ResidualBlock
# self.block = {
# 'ResBlock': ResidualBlock,
# 'Bottleneck': Bottleneck,
# }[model_cfg.get('BLOCK', 'Bottleneck')]
cr = 1
# cs = model_cfg.get('PLANES', [32, 32, 64, 128, 256, 256, 128, 96, 96])
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
self.pres = 0.05
self.vres = 0.05
self.stem = nn.Sequential(
spnn.Conv3d(self.in_feature_dim, cs[0], kernel_size=3, stride=1),
SyncBatchNorm(cs[0]), spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
SyncBatchNorm(cs[0]), spnn.ReLU(True))
self.in_channels = cs[0]
self.stage1 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[1], self.num_layer[0]),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[2], self.num_layer[1]),
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[3], self.num_layer[2]),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[4], self.num_layer[3]),
)
self.up1 = [BasicDeconvolutionBlock(self.in_channels, cs[5], ks=2, stride=2)]
self.in_channels = cs[5] + cs[3] * self.block.expansion
self.up1.append(nn.Sequential(*self._make_layer(self.block, cs[5], self.num_layer[4])))
self.up1 = nn.ModuleList(self.up1)
self.up2 = [BasicDeconvolutionBlock(self.in_channels, cs[6], ks=2, stride=2)]
self.in_channels = cs[6] + cs[2] * self.block.expansion
self.up2.append(nn.Sequential(*self._make_layer(self.block, cs[6], self.num_layer[5])))
self.up2 = nn.ModuleList(self.up2)
self.up3 = [BasicDeconvolutionBlock(self.in_channels, cs[7], ks=2, stride=2)]
self.in_channels = cs[7] + cs[1] * self.block.expansion
self.up3.append(nn.Sequential(*self._make_layer(self.block, cs[7], self.num_layer[6])))
self.up3 = nn.ModuleList(self.up3)
self.up4 = [BasicDeconvolutionBlock(self.in_channels, cs[8], ks=2, stride=2)]
self.in_channels = cs[8] + cs[0]
self.up4.append(nn.Sequential(*self._make_layer(self.block, cs[8], self.num_layer[7])))
self.up4 = nn.ModuleList(self.up4)
# self.multi_scale = self.model_cfg.get('MULTI_SCALE', 'concat')
self.multi_scale = 'concat'
if self.multi_scale == 'concat':
self.classifier = nn.Sequential(nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion, self.num_class))
elif self.multi_scale == 'sum':
raise Exception('obsolete')
self.l1 = nn.Linear(cs[4] * self.block.expansion, cs[8] * self.block.expansion)
self.l2 = nn.Linear(cs[6] * self.block.expansion, cs[8] * self.block.expansion)
self.classifier = nn.Sequential(nn.Linear(cs[8] * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
elif self.multi_scale == 'se':
raise Exception('obsolete')
self.pool = nn.AdaptiveMaxPool1d(1)
self.attn = nn.Sequential(
nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), cs[8] * self.block.expansion, bias=False),
nn.ReLU(True),
nn.Linear(cs[8] * self.block.expansion, (cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), bias=False),
nn.Sigmoid(),
)
self.classifier = nn.Sequential(nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
else:
self.classifier = nn.Sequential(nn.Linear(cs[8] * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[4] * self.block.expansion),
nn.SyncBatchNorm(cs[4] * self.block.expansion),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[4] * self.block.expansion, cs[6] * self.block.expansion),
nn.SyncBatchNorm(cs[6] * self.block.expansion),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[6] * self.block.expansion, cs[8] * self.block.expansion),
nn.SyncBatchNorm(cs[8] * self.block.expansion),
nn.ReLU(True),
)
])
self.weight_initialization()
dropout_p = 0.0 #model_cfg.get('DROPOUT_P', 0.3)
self.dropout = nn.Dropout(dropout_p, True)
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.text_embeddings = torch.cat((self.text_embeddings[0, :].unsqueeze(0)*0, self.text_embeddings), dim=0)
self.point_mapping_local = nn.Linear(480, 512)
self.point_mapping_global = nn.Linear(480, 512)
self.point_mapping_global_random = nn.Linear(480, 512)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# def forward(self, x):
def forward(self, batch_dict, return_logit=False, return_tta=False):
""", previous_memory=[None, None, None, None], previous_offset=None, return_memory=False):"""
x = batch_dict
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y1 = point_to_voxel(x4, z1)
y1.F = self.dropout(y1.F)
y1 = self.up1[0](y1)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
z2 = voxel_to_point(y2, z1)
z2.F = z2.F + self.point_transforms[1](z1.F)
y3 = point_to_voxel(y2, z2)
y3.F = self.dropout(y3.F)
y3 = self.up3[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
z3 = voxel_to_point(y4, z2)
z3.F = z3.F + self.point_transforms[2](z2.F)
if self.multi_scale == 'concat':
feat = torch.cat([z1.F, z2.F, z3.F], dim=1)
if self.config['mode'] == 'pretrain':
point_local = self.point_mapping_local(feat)
point_global = self.point_mapping_global(feat)
return point_local, point_global
elif self.config['mode'] == 'finetune':
out = self.classifier(feat)
return out
elif self.config['mode'] == 'source_free':
feat = self.point_mapping_global(feat)
out = F1.conv1d(feat.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
elif self.config['mode'] == 'zero_shot':
feat = self.point_mapping_global(feat)
out = F1.conv1d(feat.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
elif self.multi_scale == 'sum':
out = self.classifier(self.l1(z1.F) + self.l2(z2.F) + z3.F)
elif self.multi_scale == 'se':
attn = torch.cat([z1.F, z2.F, z3.F], dim=1)
attn = self.pool(attn.permute(1, 0)).permute(1, 0)
attn = self.attn(attn)
out = self.classifier(torch.cat([z1.F, z2.F, z3.F], dim=1) * attn)
else:
out = self.classifier(z3.F)
return out
def forward_ensemble(self, batch_dict):
return self.forward(batch_dict, ensemble=True)
| 18,958 | 36.691849 | 155 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/vit.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from xmlrpc.client import Boolean
import torch
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.cnn.utils.weight_init import (constant_init, kaiming_init,
trunc_normal_)
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.utils import _pair as to_2tuple
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmseg.ops import resize
from mmseg.utils import get_root_logger
from builder import BACKBONES
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1.
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(PatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class TransformerEncoderLayer(BaseModule):
"""Implements one encoder layer in Vision Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): enable bias for qkv if True. Default: True
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: True.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
batch_first=True):
super(TransformerEncoderLayer, self).__init__()
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.attn = MultiheadAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
batch_first=batch_first,
bias=qkv_bias)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, embed_dims, postfix=2)
self.add_module(self.norm2_name, norm2)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, return_qkv=False):
q, k, v = None, None, None
if return_qkv:
y = self.norm1(x)
y = F.linear(y, self.attn.attn.in_proj_weight, self.attn.attn.in_proj_bias)
N, L, C = y.shape
y = y.view(N, L, 3, C // 3).permute(2, 0, 1, 3).reshape(3 * N, L, C // 3)
y = F.linear(y, self.attn.attn.out_proj.weight, self.attn.attn.out_proj.bias)
q, k, v = y.tensor_split(3, dim=0)
v += x
v = self.ffn(self.norm2(v), identity=v)
x = self.attn(self.norm1(x), identity=x)
x = self.ffn(self.norm2(x), identity=x)
return x, q, k, v
@BACKBONES.register_module()
class VisionTransformer(BaseModule):
"""Vision Transformer.
This backbone is the implementation of `An Image is Worth 16x16 Words:
Transformers for Image Recognition at
Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
img_size (int | tuple): Input image size. Default: 224.
patch_size (int): The patch size. Default: 16.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): embedding dimension. Default: 768.
num_layers (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): stochastic depth rate. Default 0.0
with_cls_token (bool): Whether concatenating class token into image
tokens as transformer input. Default: True.
output_cls_token (bool): Whether output the cls_token. If set True,
`with_cls_token` must be True. Default: False.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
patch_norm (bool): Whether to add a norm in PatchEmbed Block.
Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
img_size=224,
patch_size=16,
patch_bias=True,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=-1,
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
with_cls_token=True,
output_cls_token=False,
norm_cfg=dict(type='LN'),
act_cfg=dict(type='GELU'),
patch_norm=False,
pre_norm=False,
final_norm=False,
return_qkv=False,
skip_last_attn=False,
interpolate_mode='bicubic',
num_fcs=2,
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(VisionTransformer, self).__init__(init_cfg=init_cfg)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
if output_cls_token:
assert with_cls_token is True, f'with_cls_token must be True if' \
f'set output_cls_token to True, but got {with_cls_token}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.img_size = img_size
self.patch_size = patch_size
self.interpolate_mode = interpolate_mode
self.norm_eval = norm_eval
self.with_cp = with_cp
self.pretrained = pretrained
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=patch_size,
padding='corner',
bias=patch_bias,
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None,
)
num_patches = (img_size[0] // patch_size) * \
(img_size[1] // patch_size)
self.with_cls_token = with_cls_token
self.output_cls_token = output_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, embed_dims))
self.drop_after_pos = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
if out_indices == -1:
out_indices = num_layers - 1
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, num_layers)
] # stochastic depth decay rule
self.layers = ModuleList()
for i in range(num_layers):
self.layers.append(
TransformerEncoderLayer(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=mlp_ratio * embed_dims,
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[i],
num_fcs=num_fcs,
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
batch_first=True))
self.pre_norm = pre_norm
if pre_norm:
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, embed_dims, postfix=0)
self.add_module(self.norm0_name, norm0)
self.final_norm = final_norm
if final_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.return_qkv = [False] * num_layers
if isinstance(return_qkv, bool):
for out_i in self.out_indices:
self.return_qkv[out_i] = return_qkv
elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):
for i, out_i in enumerate(self.out_indices):
self.return_qkv[out_i] = return_qkv[i]
else:
raise TypeError('return_qkv must be type of bool, list or tuple')
self.skip_last_attn = skip_last_attn
@property
def norm0(self):
return getattr(self, self.norm0_name)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def init_weights(self):
if (isinstance(self.init_cfg, dict)
and self.init_cfg.get('type') == 'Pretrained'):
logger = get_root_logger()
checkpoint = _load_checkpoint(
self.init_cfg['checkpoint'], logger=logger, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if 'pos_embed' in state_dict.keys():
if self.pos_embed.shape != state_dict['pos_embed'].shape:
logger.info(msg=f'Resize the pos_embed shape from '
f'{state_dict["pos_embed"].shape} to '
f'{self.pos_embed.shape}')
h, w = self.img_size
pos_size = int(
math.sqrt(state_dict['pos_embed'].shape[1] - 1))
state_dict['pos_embed'] = self.resize_pos_embed(
state_dict['pos_embed'],
(h // self.patch_size, w // self.patch_size),
(pos_size, pos_size), self.interpolate_mode)
print(self.load_state_dict(state_dict, False))
elif self.init_cfg is not None:
super(VisionTransformer, self).init_weights()
else:
# We only implement the 'jax_impl' initialization implemented at
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
if 'ffn' in n:
nn.init.normal_(m.bias, mean=0., std=1e-6)
else:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
kaiming_init(m, mode='fan_in', bias=0.)
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):
constant_init(m, val=1.0, bias=0.)
def _pos_embeding(self, patched_img, hw_shape, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
hw_shape (tuple): The downsampled image resolution.
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, hw_shape,
(pos_h, pos_w),
self.interpolate_mode)
return self.drop_after_pos(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shpae (tuple): Tuple for (downsampled input image height,
downsampled input image width).
pos_shape (tuple): The resolution of downsampled origin training
image.
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = resize(
pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x, hw_shape = self.patch_embed(inputs)
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(x, hw_shape, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer encoder input
x = x[:, 1:]
if self.pre_norm:
x = self.norm0(x)
outs = []
for i, layer in enumerate(self.layers):
x, q, k, v = layer(x, self.return_qkv[i] \
or (i == len(self.layers) - 1 and self.skip_last_attn))
if i == len(self.layers) - 1:
if self.final_norm:
x = self.norm1(x)
if self.return_qkv[i]:
v = self.norm1(v)
if self.skip_last_attn:
if self.with_cls_token:
x[:, 1:] = v[:, 1:]
else:
x = v
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
B, _, C = out.shape
out = out.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
if self.output_cls_token:
out = [out, x[:, 0]]
if self.return_qkv[i]:
if self.with_cls_token:
q = q[:, 1:]
k = k[:, 1:]
v = v[:, 1:]
v = v.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
out = [out, q, k, v]
outs.append(out)
return tuple(outs)
def train(self, mode=True):
super(VisionTransformer, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.LayerNorm):
m.eval()
| 26,623 | 39.03609 | 128 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/minkunet.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://arxiv.org/abs/2007.10985
from model.resnet import ResNetBase, get_norm
from model.modules.common import ConvType, NormType, conv, conv_tr
from model.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU
from MinkowskiEngine import SparseTensor
import MinkowskiEngine.MinkowskiOps as me
# import torchsparse.nn.functional as F
from torch.nn import functional as F
import torch
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
# CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE # FOR ME0.5
def __init__(self, in_channels, out_channels, config, D=3):
self.normalize_feature = config["normalize_features"]
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config["bn_momentum"]
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config["kernel_size"], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr4 = get_norm(
self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr5 = get_norm(
self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr6 = get_norm(
self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr7 = get_norm(
self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.final = conv(
self.PLANES[7], 512, kernel_size=1, stride=1, bias=True, D=D
)
self.relu = MinkowskiReLU(inplace=True)
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.text_embeddings = torch.cat((self.text_embeddings[0, :].unsqueeze(0)*0, self.text_embeddings), dim=0)
self.local_feature = conv(
self.PLANES[7], 512, kernel_size=1, stride=1, bias=True, D=D
)
self.classifier = conv(
self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D
)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
encoder_out = self.block4(out)
out = self.convtr4p16s2(encoder_out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
feats = self.block8(out)
# out = self.final(out)
if self.config['mode'] == 'pretrain':
out = self.final(feats)
local_feature = self.local_feature(feats)
return out.F, local_feature.F
elif self.config['mode'] == 'finetune':
out = self.classifier(feats)
return out.F
elif self.config['mode'] == 'source_free':
feat = self.final(feats)
out = F.conv1d(feat.F.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
| 11,742 | 28.804569 | 114 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/__init__.py
|
from model.image_model import *
from model.fusionNet import *
from model.maskclip_model import *
# from model.clip_model import *
try:
from model.spvcnn import SPVCNN as SPVCNN
except ImportError:
SPVCNN = None
try:
from model.minkunet import MinkUNet14A as MinkUNet
except ImportError:
MinkUNet = None
# try:
# from model.spconv_backbone import VoxelNet
# except ImportError:
# VoxelNet = None
| 422 | 20.15 | 54 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/builder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.cnn.bricks.registry import ATTENTION as MMCV_ATTENTION
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
ATTENTION = Registry('attention', parent=MMCV_ATTENTION)
BACKBONES = MODELS
NECKS = MODELS
HEADS = MODELS
LOSSES = MODELS
SEGMENTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_segmentor(cfg, train_cfg=None, test_cfg=None):
"""Build segmentor."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return SEGMENTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 1,336 | 25.215686 | 71 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/maskclip_model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import print_log
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from torch.nn.modules.utils import _pair as to_2tuple
from mmseg.ops import resize
from mmseg.utils import get_root_logger
import math
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.utils import to_2tuple
import torch.nn as nn
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import re
def load_checkpoint1(model_load_path, model):
my_model_dict = model.state_dict()
pre_weight = torch.load(model_load_path, map_location='cpu')['state_dict']
revise_keys = [(r'^backbone\.', '')]
for p, r in revise_keys:
pre_weight = OrderedDict(
{re.sub(p, r, k): v
for k, v in pre_weight.items()})
part_load = {}
match_size = 0
nomatch_size = 0
for k in pre_weight.keys():
value = pre_weight[k]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
match_size += 1
part_load[k] = value
else:
print("missed keys: ", k)
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
class MaskClipHead(nn.Module):
def __init__(self,
text_embeddings_path='/mnt/lustre/chenrunnan/projects/MaskCLIP/pretrain/nuscenes_ViT16_clip_text.pth',
visual_projs_path='/mnt/lustre/chenrunnan/projects/MaskCLIP/pretrain/ViT16_clip_weights.pth',
channels=0,
num_classes=16,
in_channels=768,
dropout_ratio=0,
conv_cfg=None,
norm_cfg=dict(type='SyncBN', requires_grad=True),
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
ignore_index=255,
align_corners=False,
freeze=False,
text_categories=16,
text_channels=512,
vit=True,
ks_thresh=1,
pd_thresh=0.5,
attn_pooling=False,
num_heads=32,
**kwargs):
super(MaskClipHead, self).__init__(**kwargs)
self.in_channels = in_channels
self.input_transform = input_transform
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.ignore_index = ignore_index
self.align_corners = align_corners
if channels > 0:
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
self.freeze = freeze
self.text_categories = text_categories
self.text_channels = text_channels
self.text_embeddings_path = text_embeddings_path
self.visual_projs_path = visual_projs_path
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, text_channels))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, text_channels))
self.load_text_embeddings()
self.vit = vit
if vit:
self.proj = nn.Conv2d(self.in_channels, text_channels, 1, bias=False)
else:
self.q_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.k_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.v_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.c_proj = nn.Conv2d(self.in_channels, text_channels, 1)
self.load_visual_projs()
self.ks_thresh = ks_thresh
self.pd_thresh = pd_thresh
self.attn_pooling = attn_pooling
self.num_heads = num_heads
self.image_mapping_local = nn.Conv2d(self.in_channels, 512, 1)
def load_text_embeddings(self):
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
print_log(f'Loaded text embeddings from {self.text_embeddings_path}', logger=get_root_logger())
def load_visual_projs(self):
loaded = torch.load(self.visual_projs_path, map_location='cuda')
attrs = ['proj'] if self.vit else ['q_proj', 'k_proj', 'v_proj', 'c_proj']
for attr in attrs:
current_attr = getattr(self, attr)
state_dict = loaded[attr]
for key in state_dict:
if 'weight' in key:
state_dict[key] = state_dict[key][:, :, None, None]
current_attr.load_state_dict(state_dict)
print("attrs", attrs)
print_log(f'Loaded proj weights from {self.visual_projs_path}', logger=get_root_logger())
def _init_inputs(self, in_channels, in_index, input_transform):
pass
def _transform_inputs(self, inputs):
pass
# def forward(self, inputs, img_metas, test_cfg):
def forward(self, inputs):
# x = self._transform_inputs(inputs)
x = inputs[self.in_index]
q, k, v, cls_token = None, None, None, None
if self.vit:
if isinstance(x, list) and len(x) == 4:
x, q, k, v = x
if isinstance(x, list) and len(x) == 2:
x, cls_token = x
if v is not None:
feat = self.proj(v)
image_local = self.image_mapping_local(v)
else:
feat = self.proj(x)
if cls_token is not None:
cls_token = self.proj(cls_token[:, :, None, None])[:, :, 0, 0]
else:
if self.attn_pooling:
N, C, H, W = x.shape
x = x.view(N, C, -1).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight[:, :, 0, 0],
k_proj_weight=self.k_proj.weight[:, :, 0, 0],
v_proj_weight=self.v_proj.weight[:, :, 0, 0],
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight[:, :, 0, 0],
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
feat = x[1:].permute(1, 2, 0).view(N, -1, H, W)
else:
q = self.q_proj(x)
k = self.k_proj(x)
q = torch.flatten(q, start_dim=2).transpose(-2, -1)
k = torch.flatten(k, start_dim=2).transpose(-2, -1)
v = self.v_proj(x)
feat = self.c_proj(v)
output = self.cls_seg(feat)
# if not self.training:
# output = self.refine_output(output, k)
return image_local, output
def cls_seg(self, feat):
feat = feat / feat.norm(dim=1, keepdim=True)
output = F.conv2d(feat, self.text_embeddings[:, :, None, None])
return output
def refine_output(self, output, k):
if self.pd_thresh > 0:
N, C, H, W = output.shape
_output = F.softmax(output * 100, dim=1)
max_cls_conf = _output.view(N, C, -1).max(dim=-1)[0]
selected_cls = (max_cls_conf < self.pd_thresh)[:, :, None, None].expand(N, C, H, W)
output[selected_cls] = -100
if k is not None and self.ks_thresh > 0:
output = F.softmax(output * 100, dim=1)
N, C, H, W = output.shape
output = output.view(N, C, -1).transpose(-2, -1)
# softmax
# weight = k @ k.transpose(-2, -1)
# weight = F.softmax(weight, dim=-1)
# L2 distance
k = F.normalize(k, p=2)
weight = k @ k.transpose(-2, -1)
selected_pos = (output.max(dim=-1, keepdim=True)[0] < self.ks_thresh)
selected_pos = selected_pos.expand(-1, -1, C)
weighted_output = weight @ output
output[selected_pos] = weighted_output[selected_pos]
output = output.transpose(-2, -1).view(N, C, H, W)
return output
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1.
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(PatchEmbed, self).__init__()
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class TransformerEncoderLayer(nn.Module):
"""Implements one encoder layer in Vision Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): enable bias for qkv if True. Default: True
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: True.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
batch_first=True):
super(TransformerEncoderLayer, self).__init__()
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.attn = MultiheadAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
batch_first=batch_first,
bias=qkv_bias)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, embed_dims, postfix=2)
self.add_module(self.norm2_name, norm2)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, return_qkv=False):
q, k, v = None, None, None
if return_qkv:
y = self.norm1(x)
y = F.linear(y, self.attn.attn.in_proj_weight, self.attn.attn.in_proj_bias)
N, L, C = y.shape
y = y.view(N, L, 3, C//3).permute(2, 0, 1, 3).reshape(3*N, L, C//3)
y = F.linear(y, self.attn.attn.out_proj.weight, self.attn.attn.out_proj.bias)
# q, k, v = y.tensor_split(3, dim=0)
nn = y.shape[0]
q, k, v = y[:nn//3, :, :], y[nn//3:(nn//3) * 2, :, :], y[(nn//3) * 2:, :, :]
v += x
v = self.ffn(self.norm2(v), identity=v)
x = self.attn(self.norm1(x), identity=x)
x = self.ffn(self.norm2(x), identity=x)
return x, q, k, v
# @BACKBONES.register_module()
class VisionTransformer(nn.Module):
"""Vision Transformer.
This backbone is the implementation of `An Image is Worth 16x16 Words:
Transformers for Image Recognition at
Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
img_size (int | tuple): Input image size. Default: 224.
patch_size (int): The patch size. Default: 16.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): embedding dimension. Default: 768.
num_layers (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): stochastic depth rate. Default 0.0
with_cls_token (bool): Whether concatenating class token into image
tokens as transformer input. Default: True.
output_cls_token (bool): Whether output the cls_token. If set True,
`with_cls_token` must be True. Default: False.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
patch_norm (bool): Whether to add a norm in PatchEmbed Block.
Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
img_size=(224, 224),
patch_size=16,
patch_bias=False,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=-1,
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
with_cls_token=True,
output_cls_token=False,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
patch_norm=False,
pre_norm=True,
final_norm=True,
return_qkv=True,
skip_last_attn=False,
interpolate_mode='bicubic',
num_fcs=2,
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(VisionTransformer, self).__init__()
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
if output_cls_token:
assert with_cls_token is True, f'with_cls_token must be True if' \
f'set output_cls_token to True, but got {with_cls_token}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.img_size = img_size
self.patch_size = patch_size
self.interpolate_mode = interpolate_mode
self.norm_eval = norm_eval
self.with_cp = with_cp
self.pretrained = pretrained
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=patch_size,
padding='corner',
bias=patch_bias,
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None,
)
num_patches = (img_size[0] // patch_size) * \
(img_size[1] // patch_size)
self.with_cls_token = with_cls_token
self.output_cls_token = output_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, embed_dims))
self.drop_after_pos = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
if out_indices == -1:
out_indices = num_layers - 1
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, num_layers)
] # stochastic depth decay rule
self.layers = ModuleList()
for i in range(num_layers):
self.layers.append(
TransformerEncoderLayer(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=mlp_ratio * embed_dims,
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[i],
num_fcs=num_fcs,
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
batch_first=True))
self.pre_norm = pre_norm
if pre_norm:
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, embed_dims, postfix=0)
self.add_module(self.norm0_name, norm0)
self.final_norm = final_norm
if final_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.return_qkv = [False] * num_layers
if isinstance(return_qkv, bool):
for out_i in self.out_indices:
self.return_qkv[out_i] = return_qkv
elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):
for i, out_i in enumerate(self.out_indices):
self.return_qkv[out_i] = return_qkv[i]
else:
raise TypeError('return_qkv must be type of bool, list or tuple')
self.skip_last_attn = skip_last_attn
@property
def norm0(self):
return getattr(self, self.norm0_name)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _pos_embeding(self, patched_img, hw_shape, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
hw_shape (tuple): The downsampled image resolution.
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, hw_shape,
(pos_h, pos_w),
self.interpolate_mode)
return self.drop_after_pos(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shpae (tuple): Tuple for (downsampled input image height,
downsampled input image width).
pos_shape (tuple): The resolution of downsampled origin training
image.
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = resize(
pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x, hw_shape = self.patch_embed(inputs)
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(x, hw_shape, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer encoder input
x = x[:, 1:]
if self.pre_norm:
x = self.norm0(x)
outs = []
for i, layer in enumerate(self.layers):
x, q, k, v = layer(x, self.return_qkv[i] \
or (i==len(self.layers)-1 and self.skip_last_attn))
if i == len(self.layers) - 1:
if self.final_norm:
x = self.norm1(x)
if self.return_qkv[i]:
v = self.norm1(v)
if self.skip_last_attn:
if self.with_cls_token:
x[:, 1:] = v[:, 1:]
else:
x = v
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
B, _, C = out.shape
out = out.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
if self.output_cls_token:
out = [out, x[:, 0]]
if self.return_qkv[i]:
if self.with_cls_token:
q = q[:, 1:]
k = k[:, 1:]
v = v[:, 1:]
v = v.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
out = [out, q, k, v]
outs.append(out)
return tuple(outs)
class maskClipFeatureExtractor(nn.Module):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
config,
test_cfg=dict(mode='whole'),
img_size=(224, 416),
preprocessing=None):
super(maskClipFeatureExtractor, self).__init__()
self.encoder = VisionTransformer()
self.decoder = MaskClipHead(text_embeddings_path=config['text_embeddings_path'],
visual_projs_path=config['visual_projs_path'],
text_categories=config['text_categories'])
self.align_corners = self.decoder.align_corners
self.num_classes = self.decoder.num_classes
self.test_cfg = test_cfg
self.checkpoint = config['maskclip_checkpoint']
self.encoder = load_checkpoint1(self.checkpoint, self.encoder)
self.img_size = img_size
for param in self.encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
# @auto_fp16(apply_to=('img', ))
# def forward(self, img, img_metas, return_loss=True, **kwargs):
def forward(self, img):
x = self.encoder(img)
feat, out = self.decoder(x)
feat = resize(
input=feat,
size=self.img_size,
mode='bilinear',
align_corners=True)
feat = F.normalize(feat, p=2, dim=1)
out = resize(
input=out,
size=self.img_size,
mode='bilinear',
align_corners=self.align_corners)
seg_pred = out.argmax(dim=1)
return feat, seg_pred
def show_result(self,
img,
result,
palette=None,
classes=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5,
gt=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if classes is not None:
self.CLASSES = classes
if palette is None:
if self.PALETTE is None:
# Get random state before set seed,
# and restore random state later.
# It will prevent loss of randomness, as the palette
# may be different in each iteration if not specified.
# See: https://github.com/open-mmlab/mmdetection/issues/5844
state = np.random.get_state()
np.random.seed(42)
# random palette
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
np.random.set_state(state)
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES), '({}) vs. ({})'.format(palette.shape[0], len(self.CLASSES))
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
if gt is not None:
# set the ignored area to black
img[gt == 255, :] = np.array([0, 0, 0])
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 37,830 | 37.603061 | 119 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/modules/resnet_encoder.py
|
import torch.nn as nn
from torchvision.models.resnet import ResNet
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import Bottleneck
class ResNetEncoder(ResNet):
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.fc
del self.avgpool
def get_stages(self):
return [
nn.Identity(),
nn.Sequential(self.conv1, self.bn1, self.relu),
nn.Sequential(self.maxpool, self.layer1),
self.layer2,
self.layer3,
self.layer4,
]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range(6):
x = stages[i](x)
features.append(x)
return features[5]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("fc.bias", None)
state_dict.pop("fc.weight", None)
super().load_state_dict(state_dict, **kwargs)
resnet_encoders = {
"resnet18": {
"encoder": ResNetEncoder,
"params": {
"block": BasicBlock,
"layers": [2, 2, 2, 2],
},
},
"resnet50": {
"encoder": ResNetEncoder,
"params": {
"block": Bottleneck,
"layers": [3, 4, 6, 3],
},
},
}
| 1,314 | 22.070175 | 59 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/modules/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
from enum import Enum
import MinkowskiEngine as ME
class NormType(Enum):
BATCH_NORM = 0
SPARSE_LAYER_NORM = 1
SPARSE_INSTANCE_NORM = 2
SPARSE_SWITCH_NORM = 3
def get_norm(norm_type, n_channels, D, bn_momentum=0.1):
if norm_type == NormType.BATCH_NORM:
return ME.MinkowskiBatchNorm(n_channels, momentum=bn_momentum)
elif norm_type == NormType.SPARSE_INSTANCE_NORM:
return ME.MinkowskiInstanceNorm(n_channels, D=D)
else:
raise ValueError(f"Norm type: {norm_type} not supported")
class ConvType(Enum):
"""
Define the kernel region type
"""
HYPERCUBE = 0, "HYPERCUBE"
SPATIAL_HYPERCUBE = 1, "SPATIAL_HYPERCUBE"
SPATIO_TEMPORAL_HYPERCUBE = 2, "SPATIO_TEMPORAL_HYPERCUBE"
HYPERCROSS = 3, "HYPERCROSS"
SPATIAL_HYPERCROSS = 4, "SPATIAL_HYPERCROSS"
SPATIO_TEMPORAL_HYPERCROSS = 5, "SPATIO_TEMPORAL_HYPERCROSS"
SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS = 6, "SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS "
def __new__(cls, value, name):
member = object.__new__(cls)
member._value_ = value
member.fullname = name
return member
def __int__(self):
return self.value
# Covert the ConvType var to a RegionType var
conv_to_region_type = {
# kernel_size = [k, k, k, 1]
ConvType.HYPERCUBE: ME.RegionType.HYPER_CUBE,
ConvType.SPATIAL_HYPERCUBE: ME.RegionType.HYPER_CUBE,
ConvType.SPATIO_TEMPORAL_HYPERCUBE: ME.RegionType.HYPER_CUBE,
ConvType.HYPERCROSS: ME.RegionType.HYPER_CROSS,
ConvType.SPATIAL_HYPERCROSS: ME.RegionType.HYPER_CROSS,
ConvType.SPATIO_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CROSS,
ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS: ME.RegionType.HYPER_CROSS,
}
int_to_region_type = {i: m[0] for i, m in enumerate(ME.RegionType.__entries.values())}
def convert_conv_type(conv_type, kernel_size, D):
assert isinstance(conv_type, ConvType), "conv_type must be of ConvType"
region_type = conv_to_region_type[conv_type]
axis_types = None
if conv_type == ConvType.SPATIAL_HYPERCUBE:
# No temporal convolution
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCUBE:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.HYPERCUBE:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIAL_HYPERCROSS:
if isinstance(kernel_size, collections.Sequence):
kernel_size = kernel_size[:3]
else:
kernel_size = [
kernel_size,
] * 3
if D == 4:
kernel_size.append(1)
elif conv_type == ConvType.HYPERCROSS:
# conv_type conversion already handled
pass
elif conv_type == ConvType.SPATIO_TEMPORAL_HYPERCROSS:
# conv_type conversion already handled
assert D == 4
elif conv_type == ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS:
# Define the CUBIC conv kernel for spatial dims and CROSS conv for temp dim
axis_types = [
ME.RegionType.HYPER_CUBE,
] * 3
if D == 4:
axis_types.append(ME.RegionType.HYPER_CROSS)
return region_type, axis_types, kernel_size
def conv(
in_planes,
out_planes,
kernel_size,
stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1,
):
assert D > 0, "Dimension must be a positive integer"
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D,
)
return ME.MinkowskiConvolution(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
bias=bias,
kernel_generator=kernel_generator,
dimension=D,
)
def conv_tr(
in_planes,
out_planes,
kernel_size,
upsample_stride=1,
dilation=1,
bias=False,
conv_type=ConvType.HYPERCUBE,
D=-1,
):
assert D > 0, "Dimension must be a positive integer"
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
upsample_stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D,
)
return ME.MinkowskiConvolutionTranspose(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=kernel_size,
stride=upsample_stride,
dilation=dilation,
bias=bias,
kernel_generator=kernel_generator,
dimension=D,
)
def sum_pool(kernel_size, stride=1, dilation=1, conv_type=ConvType.HYPERCUBE, D=-1):
assert D > 0, "Dimension must be a positive integer"
region_type, axis_types, kernel_size = convert_conv_type(conv_type, kernel_size, D)
kernel_generator = ME.KernelGenerator(
kernel_size,
stride,
dilation,
region_type=region_type,
axis_types=axis_types,
dimension=D,
)
return ME.MinkowskiSumPooling(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
kernel_generator=kernel_generator,
dimension=D,
)
| 5,815 | 28.673469 | 87 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/modules/__init__.py
| 0 | 0 | 0 |
py
|
|
CLIP2Scene
|
CLIP2Scene-main/model/modules/resnet_block.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from model.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3,
):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
conv_type=conv_type,
D=D,
)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D,
)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3,
):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
conv_type=conv_type,
D=D,
)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(
self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum
)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
| 3,375 | 23.114286 | 81 |
py
|
CLIP2Scene
|
CLIP2Scene-main/model/modules/dino/vision_transformer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import os
import math
from functools import partial
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x, all=False):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if all:
return x
else:
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif model_name == "xcit_small_12_p16":
url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth"
elif model_name == "xcit_small_12_p8":
url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth"
elif model_name == "xcit_medium_24_p16":
url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth"
elif model_name == "xcit_medium_24_p8":
url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 15,379 | 40.013333 | 124 |
py
|
les-distance
|
les-distance-master/les/main_tori.py
|
import numpy as np
import matplotlib.pyplot as plt
from les import les_desc_comp, les_dist_comp
from comparisons import CompareIMD, CompareIMDOurApproach, CompareTDA, CompareGS, CompareGW
# Simulation parameters:
N = 1000 # Number of samples - reduced from N=3000 for faster computation times
ITER_NUM = 2 # Number of trials to average on
R1 = 10 # Major radius
R2 = 3 # Minor/middle radius in 2D/3D
R3 = 1 # Minor radius in 3D
NOISE_VAR = 0.01 # STD of added noise to the tori data
R_RATIOS = np.arange(0.4, 1.01, 0.2) # Radius ratio (c parameter)
DICT_KEYS = ['t2D_2DSc', 't2D_3D', 't2D_3DSc', 't3D_2DSc', 't3D_3DSc']
# LES hyperparameter
GAMMA = 1e-8 # Kernel regularization parameter
SIGMA = 2 # Kernel scale
NEV = 200 # Number of eigenvalues to estimate
# ========================== Comparisons: ==========================
# List of algorithms to compare with. Possible algorithms: 'imd_ours', 'imd', 'tda', 'gs', 'gw'
ALGS2COMPARE = ['imd_ours'] # ['imd_ours', 'imd', 'gs', 'gw', 'tda']
ALG_CLASS = {'imd': CompareIMD,
'imd_ours': CompareIMDOurApproach,
'tda': CompareTDA,
'gs': CompareGS,
'gw': CompareGW,
}
# Initialize the classes that compute the compared algorithms
algs_dists = {}
for alg in ALGS2COMPARE:
alg = alg.lower()
if alg == 'tda':
algs_dists['tda_H0'] = ALG_CLASS[alg](0, ITER_NUM, R_RATIOS, DICT_KEYS)
algs_dists['tda_H1'] = ALG_CLASS[alg](1, ITER_NUM, R_RATIOS, DICT_KEYS)
algs_dists['tda_H2'] = ALG_CLASS[alg](2, ITER_NUM, R_RATIOS, DICT_KEYS)
elif alg == 'imd_ours':
algs_dists[alg] = ALG_CLASS[alg](GAMMA, ITER_NUM, R_RATIOS, DICT_KEYS)
else:
algs_dists[alg] = ALG_CLASS[alg](ITER_NUM, R_RATIOS, DICT_KEYS)
# ========= Initializations and tori equation definitions =========
les_dist = {key: np.zeros((ITER_NUM, len(R_RATIOS))) for key in DICT_KEYS}
def tori_2d_gen(c):
ang1, ang2, ang3 = 2 * np.pi * np.random.rand(N), 2 * np.pi * np.random.rand(N), 2 * np.pi * np.random.rand(N)
tor2d = np.concatenate(([(R1 + c * R2 * np.cos(ang2)) * np.cos(ang1)],
[(R1 + c * R2 * np.cos(ang2)) * np.sin(ang1)],
[c * R2 * np.sin(ang2)]),
axis=0)
tor2d += NOISE_VAR * np.random.randn(3, N)
return tor2d
def tori_3d_gen(c):
ang1, ang2, ang3 = 2 * np.pi * np.random.rand(N), 2 * np.pi * np.random.rand(N), 2 * np.pi * np.random.rand(N)
tor3d = np.concatenate(([(R1 + (R2 + c * R3 * np.cos(ang3)) * np.cos(ang2)) * np.cos(ang1)],
[(R1 + (R2 + c * R3 * np.cos(ang3)) * np.cos(ang2)) * np.sin(ang1)],
[(R2 + c * R3 * np.cos(ang3)) * np.sin(ang2)],
[c * R3 * np.sin(ang3)]),
axis=0)
tor3d += NOISE_VAR * np.random.randn(4, N)
return tor3d
for ite in range(ITER_NUM):
print(f'Running iteration number {ite}')
for i, r_ratio in enumerate(R_RATIOS):
print(f'Computing radius ratio c = {r_ratio:.1f}')
# -------------- Generate tori data --------------
data_2d_tor = tori_2d_gen(1)
data_2d_tor_sc = tori_2d_gen(r_ratio)
data_3d_tor = tori_3d_gen(1)
data_3d_tor_sc = tori_3d_gen(r_ratio)
# ---- Computing dataset descriptors and distances ----
print('Computing LES descriptors and distances')
les_desc_2d_tor = les_desc_comp(data_2d_tor.T, SIGMA, NEV, GAMMA)
les_desc_2d_tor_sc = les_desc_comp(data_2d_tor_sc.T, SIGMA, NEV, GAMMA)
les_desc_3d_tor = les_desc_comp(data_3d_tor.T, SIGMA, NEV, GAMMA)
les_desc_3d_tor_sc = les_desc_comp(data_3d_tor_sc.T, SIGMA, NEV, GAMMA)
les_dist['t2D_2DSc'][ite, i] = les_dist_comp(les_desc_2d_tor, les_desc_2d_tor_sc)
les_dist['t2D_3D'][ite, i] = les_dist_comp(les_desc_2d_tor, les_desc_3d_tor)
les_dist['t2D_3DSc'][ite, i] = les_dist_comp(les_desc_2d_tor, les_desc_3d_tor_sc)
les_dist['t3D_2DSc'][ite, i] = les_dist_comp(les_desc_3d_tor, les_desc_2d_tor_sc)
les_dist['t3D_3DSc'][ite, i] = les_dist_comp(les_desc_3d_tor, les_desc_3d_tor_sc)
for alg in algs_dists:
print('Computing ' + alg.upper() + ' descriptors')
if alg == 'imd_ours':
algs_dists[alg].comp_all_tori_dists(ite, i, les_desc_2d_tor, les_desc_2d_tor_sc, les_desc_3d_tor,
les_desc_3d_tor_sc)
else:
algs_dists[alg].comp_all_tori_dists(ite, i, data_2d_tor.T, data_2d_tor_sc.T, data_3d_tor.T,
data_3d_tor_sc.T)
# ========================== Plot display ==========================
plt.style.use('seaborn-paper')
line_width = 3
alpha_val = 0.2
def create_distance_plt(var, ylabel='', xlabel=''):
plt.plot(R_RATIOS, np.mean(var['t2D_2DSc'], axis=0), '-', color='teal', linewidth=line_width,
label="$d(T_{2},T_{2}^{Sc})$")
x, y, err = R_RATIOS, np.mean(var['t2D_2DSc'], axis=0), np.std(var['t2D_2DSc'], axis=0)
plt.fill_between(x, y - err, y + err, alpha=alpha_val, facecolor='teal', linewidth=0)
plt.plot(R_RATIOS, np.mean(var['t3D_3DSc'], axis=0), '-', color='indigo', linewidth=line_width,
label="$d(T_{3},T_{3}^{Sc})$")
y, err = np.mean(var['t3D_3DSc'], axis=0), np.std(var['t3D_3DSc'], axis=0)
plt.fill_between(x, y - err, y + err, alpha=alpha_val, facecolor='indigo', linewidth=0)
plt.plot(R_RATIOS, np.mean(var['t2D_3D'], axis=0), '--', color='yellowgreen', linewidth=line_width,
label="$d(T_{2},T_{3})$")
y, err = np.mean(var['t2D_3D'], axis=0), np.std(var['t2D_3D'], axis=0)
plt.fill_between(x, y - err, y + err, alpha=alpha_val, facecolor='yellowgreen', linewidth=0)
plt.plot(R_RATIOS, np.mean(var['t2D_3DSc'], axis=0), '--', color='plum', linewidth=line_width,
label="$d(T_{2},T_{3}^{Sc})$")
y, err = np.mean(var['t2D_3DSc'], axis=0), np.std(var['t2D_3DSc'], axis=0)
plt.fill_between(x, y - err, y + err, alpha=alpha_val, facecolor='plum', linewidth=0)
plt.plot(R_RATIOS, np.mean(var['t3D_2DSc'], axis=0), '--', color='tomato', linewidth=line_width,
label="$d(T_{3},T_{2}^{Sc})$")
y, err = np.mean(var['t3D_2DSc'], axis=0), np.std(var['t3D_2DSc'], axis=0)
plt.fill_between(x, y - err, y + err, alpha=alpha_val, facecolor='tomato', linewidth=0)
plt.ylim(bottom=0)
plt.xticks([0.4, 0.6, 0.8, 1], ['0.4', '0.6', '0.8', '1'])
plt.ylabel(ylabel, fontsize=16)
plt.xlabel(xlabel, fontsize=16)
anum = len(algs_dists) + 1
if anum <= 4:
sbplt_rc = [1, anum]
else:
sbplt_rc = [2, int(np.ceil(anum/2))]
fig = plt.figure(figsize=[10, 6])
plt.subplot(sbplt_rc[0], sbplt_rc[1], 1)
create_distance_plt(les_dist, ylabel='LES', xlabel='Radius Scale (c)')
for i, alg in enumerate(algs_dists):
plt.subplot(sbplt_rc[0], sbplt_rc[1], i+2)
create_distance_plt(algs_dists[alg].all_distances, ylabel=alg.upper(), xlabel='Radius Scale (c)')
# plt.legend(framealpha=1, frameon=True, handlelength=2.5)
fig.tight_layout()
legendid = plt.legend(framealpha=1, frameon=True, loc='upper right', bbox_to_anchor=(0.95, 2.4), fontsize=14, labelspacing=0.1, handlelength=2, ncol=5)
plt.savefig('Tori_comparisons.pdf')
plt.show()
| 7,358 | 46.173077 | 151 |
py
|
les-distance
|
les-distance-master/les/les.py
|
import numpy as np
import scipy.spatial as spat
import scipy.linalg as spla
from scipy.sparse.linalg import eigsh
APPROXEIG = True # Use [Tropp et al., 2017] approximation for the eigenvalues
M = 2 # Number of random vectors to use in [Tropp et al., 2017] (total = M x nev)
def _build_graph(data, sigma=2):
"""
Compute an approximation of the heat kernel based on diffusion maps
:param data: data samples organized as [samples x features]
:param sigma: kernel scale (multiplying the median of the distances)
:return h_op: a Discrete approximation of the heat kernel using diffusion maps
"""
# -------------- Distance matrix and kernel computation: -------------
dist_mat = np.square(spat.distance.squareform(spat.distance.pdist(data)))
dist_mat = np.exp(-dist_mat / (sigma * np.median(dist_mat)))
# ------------ Construction of the symmetric diffusion operator: ------------
h_op = dist_mat
d = 1 / np.sum(dist_mat, axis=1)
for i in range(h_op.shape[0]):
h_op[i, :] *= d[i]
h_op[:, i] *= d[i]
d2 = 1 / np.sqrt(np.sum(h_op, axis=1))
for i in range(h_op.shape[0]):
h_op[i, :] *= d2[i]
h_op[:, i] *= d2[i]
return h_op
def _compute_log_eigenvalues(h_op, nev=500, gamma=1e-6, tol=1e-8):
"""
Estimating the eigenvalues
:param h_op: discrete approximation of the heat kernel using diffusion maps (PSD matrix)
:param nev: number of eigenvalues to compute
:param gamma: kernel regularization parameter
:param tol: tolerance for eigenvalue computation if not using the approximation
:return levals: log of the estimated eigenvalues (with regularization parameter)
"""
if not APPROXEIG:
eigvals = eigsh(h_op, k=nev, return_eigenvectors=False, tol=tol, sigma=1, which='LM')
else:
# Fixed rank PSD approximation algorithm [Tropp et al., 2017]
mu = 2.2 * 1e-16
n = h_op.shape[0]
omega = np.random.randn(n, M * nev)
omega = spla.orth(omega)
y = h_op @ omega
nu = mu * np.linalg.norm(y, ord=2)
y_nu = y + nu * omega
b_mat = omega.T @ y_nu
c_mat = np.linalg.cholesky((b_mat + b_mat.T) / 2).T
eigvals = spla.svdvals(y_nu @ np.linalg.inv(c_mat))
eigvals = np.maximum(np.square(eigvals) - nu, 0)
eigvals = np.sort(eigvals)[-nev:]
log_eigvals = np.log(eigvals + gamma)
return log_eigvals, eigvals
def les_desc_comp(data, sigma=2, nev=500, gamma=1e-6):
"""
Compute LES descriptors
:param data: data samples organized as [samples x features]
:param sigma: kernel scale for diffusion operator (multiplying the median of the distances)
:param nev: number of eigenvalues to compute
:param gamma: kernel regularization parameter
:return: les_desc: les descriptor [1 x nev] of data
"""
h_op = _build_graph(data, sigma)
les_desc, _ = _compute_log_eigenvalues(h_op, nev, gamma)
return les_desc
def les_dist_comp(les_desc1, les_desc2):
"""
Compute the LES distance
:param les_desc1: LES descriptor of dataset1
:param les_desc2: LES descriptor of dataset2
:return: les_dist: les distance between the two datasets
"""
return np.sqrt(np.sum((les_desc1 - les_desc2) ** 2))
| 3,314 | 31.184466 | 95 |
py
|
les-distance
|
les-distance-master/les/comparisons.py
|
import abc
import numpy as np
import scipy.spatial as spat
class CompareBase:
def __init__(self, iter_num, r_ratios, dict_keys):
self.all_distances = {key: np.zeros((iter_num, len(r_ratios))) for key in dict_keys}
@abc.abstractmethod
def _comp_desc(self, data):
"""
Compute the algorithm's descriptors per dataset
:param data: dataset samples organized as [samples x features]
:return desc: descriptor vector for the dataset
"""
@abc.abstractmethod
def _comp_dist(self, desc1, desc2):
"""
Compute the algorithm's distances a pair of dataset descriptors
:param desc1, desc2: descriptors of two datasets
:return dist: distance between the datasets based on the given algorithm
"""
def comp_all_tori_dists(self, ite, i, data_2d_tor, data_2d_tor_sc, data_3d_tor, data_3d_tor_sc):
"""
Compute the distances between all tori datasets
:param ite: realization number
:param i: scale index
:param data_2d_tor, data_2d_tor_sc, data_3d_tor, data_3d_tor_sc: tori datasets organized as [samples x features]
"""
desc_2d_tor = self._comp_desc(data_2d_tor)
desc_2d_tor_sc = self._comp_desc(data_2d_tor_sc)
desc_3d_tor = self._comp_desc(data_3d_tor)
desc_3d_tor_sc = self._comp_desc(data_3d_tor_sc)
self.all_distances['t2D_2DSc'][ite, i] = self._comp_dist(desc_2d_tor, desc_2d_tor_sc)
self.all_distances['t2D_3D'][ite, i] = self._comp_dist(desc_2d_tor, desc_3d_tor)
self.all_distances['t2D_3DSc'][ite, i] = self._comp_dist(desc_2d_tor, desc_3d_tor_sc)
self.all_distances['t3D_2DSc'][ite, i] = self._comp_dist(desc_3d_tor, desc_2d_tor_sc)
self.all_distances['t3D_3DSc'][ite, i] = self._comp_dist(desc_3d_tor, desc_3d_tor_sc)
class CompareIMD(CompareBase):
def __init__(self, *args):
super().__init__(*args)
imd = __import__('msid')
self.imd_descriptor = imd.msid.msid_descriptor
# IMD hyperparameters
self.T = np.logspace(-1, 1, 256) # Temperatures for heat kernel approx.
self.IMD_N_NBRS = 5 # Number of neighbors in graph Laplacian
self.M_LANCOZ = 10 # Number of Lanczos steps in SLQ
def _comp_desc(self, data):
desc = self.imd_descriptor(data, ts=self.T, k=self.IMD_N_NBRS, graph_builder='sparse', m=self.M_LANCOZ)
return desc
def _comp_dist(self, desc1, desc2):
ct = np.exp(-2 * (self.T + 1 / self.T))
dist = np.amax(ct * np.abs(desc1 - desc2))
return dist
class CompareTDA(CompareBase):
def __init__(self, bnum, *args):
super().__init__(*args)
ripser = __import__('ripser')
self.rips = ripser.Rips(maxdim=2)
self.persim = __import__('persim')
self.bnum = bnum
def _comp_desc(self, data):
desc = self.rips.fit_transform(data)[self.bnum]
return desc
def _comp_dist(self, desc1, desc2):
dist = self.persim.bottleneck(desc1, desc2)
return dist
class CompareGS(CompareBase):
def __init__(self, *args):
super().__init__(*args)
gs = __import__('gs')
self.gs = gs
self.NGS = 200 # Tori results in Figure 1(d) are with NGS=2000, reduced here for speed
def _comp_desc(self, data):
desc = self.gs.rlts(data, n=self.NGS)
return desc
def _comp_dist(self, desc1, desc2):
dist = self.gs.geom_score(desc1, desc2)
return dist
class CompareGW:
def __init__(self, iter_num, r_ratios, dict_keys):
self.ot = __import__('ot')
self.all_distances = {key: np.zeros((iter_num, len(r_ratios))) for key in dict_keys}
def comp_all_tori_dists(self, ite, i, data_2d_tor, data_2d_tor_sc, data_3d_tor, data_3d_tor_sc):
"""
Compute the distances between all tori datasets
:param ite: realization number
:param i: scale index
:param data_2d_tor, data_2d_tor_sc, data_3d_tor, data_3d_tor_sc: tori datasets organized as [samples x features]
"""
n = data_2d_tor.shape[0]
p = self.ot.unif(n)
q = self.ot.unif(n)
dist_mat_2d_tor = spat.distance.cdist(data_2d_tor, data_2d_tor)
dist_mat_2d_tor_sc = spat.distance.cdist(data_2d_tor_sc, data_2d_tor_sc)
dist_mat_3d_tor = spat.distance.cdist(data_3d_tor, data_3d_tor)
dist_mat_3d_tor_sc = spat.distance.cdist(data_3d_tor_sc, data_3d_tor_sc)
self.all_distances['t2D_2DSc'][ite, i] = self.ot.gromov_wasserstein2(dist_mat_2d_tor, dist_mat_2d_tor_sc, p, q)
self.all_distances['t2D_3D'][ite, i] = self.ot.gromov_wasserstein2(dist_mat_2d_tor, dist_mat_3d_tor, p, q)
self.all_distances['t2D_3DSc'][ite, i] = self.ot.gromov_wasserstein2(dist_mat_2d_tor, dist_mat_3d_tor_sc, p, q)
self.all_distances['t3D_2DSc'][ite, i] = self.ot.gromov_wasserstein2(dist_mat_3d_tor, dist_mat_2d_tor_sc, p, q)
self.all_distances['t3D_3DSc'][ite, i] = self.ot.gromov_wasserstein2(dist_mat_3d_tor, dist_mat_3d_tor_sc, p, q)
class CompareIMDOurApproach:
def __init__(self, gamma, iter_num, r_ratios, dict_keys):
self.T = np.logspace(-1, 1, 256) # Temperatures for heat kernel approx.
self.gamma = gamma
self.all_distances = {key: np.zeros((iter_num, len(r_ratios))) for key in dict_keys}
def _comp_desc(self, les_desc):
"""
:param data: Here data should be the LES descriptor
"""
desc = np.sum((np.exp(les_desc) - self.gamma)[:, None] ** self.T, axis=0)
return desc
def _comp_dist(self, desc1, desc2):
ct = np.exp(-2 * (self.T + 1 / self.T))
dist = np.amax(ct * np.abs(desc1 - desc2))
return dist
def comp_all_tori_dists(self, ite, i, les_2d_tor, les_2d_tor_sc, les_3d_tor, les_3d_tor_sc):
desc_2d_tor = self._comp_desc(les_2d_tor)
desc_2d_tor_sc = self._comp_desc(les_2d_tor_sc)
desc_3d_tor = self._comp_desc(les_3d_tor)
desc_3d_tor_sc = self._comp_desc(les_3d_tor_sc)
self.all_distances['t2D_2DSc'][ite, i] = self._comp_dist(desc_2d_tor, desc_2d_tor_sc)
self.all_distances['t2D_3D'][ite, i] = self._comp_dist(desc_2d_tor, desc_3d_tor)
self.all_distances['t2D_3DSc'][ite, i] = self._comp_dist(desc_2d_tor, desc_3d_tor_sc)
self.all_distances['t3D_2DSc'][ite, i] = self._comp_dist(desc_3d_tor, desc_2d_tor_sc)
self.all_distances['t3D_3DSc'][ite, i] = self._comp_dist(desc_3d_tor, desc_3d_tor_sc)
| 6,554 | 39.96875 | 120 |
py
|
NSVF
|
NSVF-main/setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
# build clib
# _ext_src_root = "fairnr/clib"
import os
_ext_src_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fairnr/clib")
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
_ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
setup(
name='fairnr',
ext_modules=[
CUDAExtension(
name='fairnr.clib._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
"nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
},
)
],
cmdclass={
'build_ext': BuildExtension
},
entry_points={
'console_scripts': [
'fairnr-render = fairnr_cli.render:cli_main',
'fairnr-train = fairseq_cli.train:cli_main'
],
},
)
| 1,224 | 28.878049 | 87 |
py
|
NSVF
|
NSVF-main/render.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairnr_cli.render_multigpu import cli_main
if __name__ == '__main__':
cli_main()
| 296 | 23.75 | 65 |
py
|
NSVF
|
NSVF-main/validate.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairnr_cli.validate import cli_main
if __name__ == '__main__':
cli_main()
| 289 | 23.166667 | 65 |
py
|
NSVF
|
NSVF-main/extract.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairnr_cli.extract import cli_main
if __name__ == '__main__':
cli_main()
| 288 | 23.083333 | 65 |
py
|
NSVF
|
NSVF-main/train.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
from fairnr_cli.train import cli_main
from fairnr_cli.launch_slurm import launch
if __name__ == '__main__':
if os.getenv('SLURM_ARGS') is not None:
slurm_arg = eval(os.getenv('SLURM_ARGS'))
all_args = sys.argv[1:]
print(slurm_arg)
print(all_args)
launch(slurm_arg, all_args)
else:
cli_main()
| 573 | 26.333333 | 65 |
py
|
NSVF
|
NSVF-main/examples/data/nerf_render_ori.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys, os, argparse
import json
import bpy
import mathutils
from mathutils import Vector
import numpy as np
np.random.seed(2) # fixed seed
DEBUG = False
VOXEL_NUMS = 512
VIEWS = 200
RESOLUTION = 800
RESULTS_PATH = 'rgb'
DEPTH_SCALE = 1.4
COLOR_DEPTH = 8
FORMAT = 'PNG'
RANDOM_VIEWS = True
UPPER_VIEWS = True
CIRCLE_FIXED_START = (.3,0,0)
parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.')
parser.add_argument('output', type=str, help='path where files will be saved')
argv = sys.argv
argv = argv[argv.index("--") + 1:]
args = parser.parse_args(argv)
homedir = args.output
fp = bpy.path.abspath(f"{homedir}/{RESULTS_PATH}")
def listify_matrix(matrix):
matrix_list = []
for row in matrix:
matrix_list.append(list(row))
return matrix_list
if not os.path.exists(fp):
os.makedirs(fp)
if not os.path.exists(os.path.join(homedir, "pose")):
os.mkdir(os.path.join(homedir, "pose"))
# Data to store in JSON file
out_data = {
'camera_angle_x': bpy.data.objects['Camera'].data.angle_x,
}
# Render Optimizations
bpy.context.scene.render.use_persistent_data = True
# Set up rendering of depth map.
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
# Add passes for additionally dumping albedo and normals.
#bpy.context.scene.view_layers["RenderLayer"].use_pass_normal = True
bpy.context.scene.render.image_settings.file_format = str(FORMAT)
bpy.context.scene.render.image_settings.color_depth = str(COLOR_DEPTH)
if not DEBUG:
# Create input render layer node.
render_layers = tree.nodes.new('CompositorNodeRLayers')
depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
depth_file_output.label = 'Depth Output'
if FORMAT == 'OPEN_EXR':
links.new(render_layers.outputs['Depth'], depth_file_output.inputs[0])
else:
# Remap as other types can not represent the full range of depth.
map = tree.nodes.new(type="CompositorNodeMapValue")
# Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
map.offset = [-0.7]
map.size = [DEPTH_SCALE]
map.use_min = True
map.min = [0]
links.new(render_layers.outputs['Depth'], map.inputs[0])
links.new(map.outputs[0], depth_file_output.inputs[0])
normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
normal_file_output.label = 'Normal Output'
links.new(render_layers.outputs['Normal'], normal_file_output.inputs[0])
# Background
bpy.context.scene.render.dither_intensity = 0.0
bpy.context.scene.render.film_transparent = True
# Create collection for objects not to render with background
objs = [ob for ob in bpy.context.scene.objects if ob.type in ('EMPTY') and 'Empty' in ob.name]
bpy.ops.object.delete({"selected_objects": objs})
# bounding box
for obj in bpy.context.scene.objects:
if 'Camera' not in obj.name:
bbox = [obj.matrix_world @ Vector(corner) for corner in obj.bound_box]
bbox = [min([bb[i] for bb in bbox]) for i in range(3)] + \
[max([bb[i] for bb in bbox]) for i in range(3)]
voxel_size = ((bbox[3]-bbox[0]) * (bbox[4]-bbox[1]) * (bbox[5]-bbox[2]) / VOXEL_NUMS) ** (1/3)
print(" ".join(['{:.5f}'.format(f) for f in bbox + [voxel_size]]),
file=open(os.path.join(homedir, 'bbox.txt'), 'w'))
def parent_obj_to_camera(b_camera):
origin = (0, 0, 0)
b_empty = bpy.data.objects.new("Empty", None)
b_empty.location = origin
b_camera.parent = b_empty # setup parenting
scn = bpy.context.scene
scn.collection.objects.link(b_empty)
bpy.context.view_layer.objects.active = b_empty
# scn.objects.active = b_empty
return b_empty
scene = bpy.context.scene
scene.render.resolution_x = RESOLUTION
scene.render.resolution_y = RESOLUTION
scene.render.resolution_percentage = 100
cam = scene.objects['Camera']
cam.location = (4, -4, 4)
cam_constraint = cam.constraints.new(type='TRACK_TO')
cam_constraint.track_axis = 'TRACK_NEGATIVE_Z'
cam_constraint.up_axis = 'UP_Y'
b_empty = parent_obj_to_camera(cam)
cam_constraint.target = b_empty
scene.render.image_settings.file_format = 'PNG' # set output format to .png
from math import radians
stepsize = 360.0 / VIEWS
rotation_mode = 'XYZ'
if not DEBUG:
for output_node in [depth_file_output, normal_file_output]:
output_node.base_path = ''
out_data['frames'] = []
if not RANDOM_VIEWS:
b_empty.rotation_euler = CIRCLE_FIXED_START
for i in range(0, VIEWS):
if DEBUG:
i = np.random.randint(0,VIEWS)
b_empty.rotation_euler[2] += radians(stepsize*i)
if RANDOM_VIEWS:
scene.render.filepath = os.path.join(fp, '{:04d}'.format(i))
if UPPER_VIEWS:
rot = np.random.uniform(0, 1, size=3) * (1,0,2*np.pi)
rot[0] = np.abs(np.arccos(1 - 2 * rot[0]) - np.pi/2)
b_empty.rotation_euler = rot
else:
b_empty.rotation_euler = np.random.uniform(0, 2*np.pi, size=3)
else:
print("Rotation {}, {}".format((stepsize * i), radians(stepsize * i)))
scene.render.filepath = os.path.join(fp, '{:04d}'.format(i))
# depth_file_output.file_slots[0].path = scene.render.filepath + "_depth_"
# normal_file_output.file_slots[0].path = scene.render.filepath + "_normal_"
print('BEFORE RENDER')
if DEBUG:
break
else:
bpy.ops.render.render(write_still=True) # render still
print('AFTER RENDER')
frame_data = {
'file_path': scene.render.filepath,
'rotation': radians(stepsize),
'transform_matrix': listify_matrix(cam.matrix_world)
}
with open(os.path.join(homedir, "pose", '{:04d}.txt'.format(i)), 'w') as fo:
for ii, pose in enumerate(frame_data['transform_matrix']):
print(" ".join([str(-p) if (((j == 2) | (j == 1)) and (ii < 3)) else str(p)
for j, p in enumerate(pose)]),
file=fo)
out_data['frames'].append(frame_data)
if RANDOM_VIEWS:
if UPPER_VIEWS:
rot = np.random.uniform(0, 1, size=3) * (1,0,2*np.pi)
rot[0] = np.abs(np.arccos(1 - 2 * rot[0]) - np.pi/2)
b_empty.rotation_euler = rot
else:
b_empty.rotation_euler = np.random.uniform(0, 2*np.pi, size=3)
else:
b_empty.rotation_euler[2] += radians(stepsize)
if not DEBUG:
with open(os.path.join(homedir, 'transforms.json'), 'w') as out_file:
json.dump(out_data, out_file, indent=4)
# save camera data
H, W = RESOLUTION, RESOLUTION
f = .5 * W /np.tan(.5 * float(out_data['camera_angle_x']))
cx = cy = W // 2
# write intrinsics
with open(os.path.join(homedir, 'intrinsics.txt'), 'w') as fi:
print("{} {} {} 0.".format(f, cx, cy), file=fi)
print("0. 0. 0.", file=fi)
print("0.", file=fi)
print("1.", file=fi)
print("{} {}".format(H, W), file=fi)
| 7,120 | 32.275701 | 102 |
py
|
NSVF
|
NSVF-main/fairnr/renderer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is to simulate "generator" in fairseq
"""
import os, tempfile, shutil, glob
import time
import torch
import numpy as np
import logging
import imageio
from torchvision.utils import save_image
from fairnr.data import trajectory, geometry, data_utils
from fairseq.meters import StopwatchMeter
from fairnr.data.data_utils import recover_image, get_uv, parse_views
from pathlib import Path
logger = logging.getLogger(__name__)
class NeuralRenderer(object):
def __init__(self,
resolution="512x512",
frames=501,
speed=5,
raymarching_steps=None,
path_gen=None,
beam=10,
at=(0,0,0),
up=(0,1,0),
output_dir=None,
output_type=None,
fps=24,
test_camera_poses=None,
test_camera_intrinsics=None,
test_camera_views=None):
self.frames = frames
self.speed = speed
self.raymarching_steps = raymarching_steps
self.path_gen = path_gen
if isinstance(resolution, str):
self.resolution = [int(r) for r in resolution.split('x')]
else:
self.resolution = [resolution, resolution]
self.beam = beam
self.output_dir = output_dir
self.output_type = output_type
self.at = at
self.up = up
self.fps = fps
if self.path_gen is None:
self.path_gen = trajectory.circle()
if self.output_type is None:
self.output_type = ["rgb"]
if test_camera_intrinsics is not None:
self.test_int = data_utils.load_intrinsics(test_camera_intrinsics)
else:
self.test_int = None
self.test_frameids = None
if test_camera_poses is not None:
if os.path.isdir(test_camera_poses):
self.test_poses = [
np.loadtxt(f)[None, :, :] for f in sorted(glob.glob(test_camera_poses + "/*.txt"))]
self.test_poses = np.concatenate(self.test_poses, 0)
else:
self.test_poses = data_utils.load_matrix(test_camera_poses)
if self.test_poses.shape[1] == 17:
self.test_frameids = self.test_poses[:, -1].astype(np.int32)
self.test_poses = self.test_poses[:, :-1]
self.test_poses = self.test_poses.reshape(-1, 4, 4)
if test_camera_views is not None:
render_views = parse_views(test_camera_views)
self.test_poses = np.stack([self.test_poses[r] for r in render_views])
else:
self.test_poses = None
def generate_rays(self, t, intrinsics, img_size, inv_RT=None, action='none'):
if inv_RT is None:
cam_pos = torch.tensor(self.path_gen(t * self.speed / 180 * np.pi),
device=intrinsics.device, dtype=intrinsics.dtype)
cam_rot = geometry.look_at_rotation(cam_pos, at=self.at, up=self.up, inverse=True, cv=True)
inv_RT = cam_pos.new_zeros(4, 4)
inv_RT[:3, :3] = cam_rot
inv_RT[:3, 3] = cam_pos
inv_RT[3, 3] = 1
else:
inv_RT = torch.from_numpy(inv_RT).type_as(intrinsics)
h, w, rh, rw = img_size[0], img_size[1], img_size[2], img_size[3]
if self.test_int is not None:
uv = torch.from_numpy(get_uv(h, w, h, w)[0]).type_as(intrinsics)
intrinsics = self.test_int
else:
uv = torch.from_numpy(get_uv(h * rh, w * rw, h, w)[0]).type_as(intrinsics)
uv = uv.reshape(2, -1)
return uv, inv_RT
def parse_sample(self,sample):
if len(sample) == 1:
return sample[0], 0, self.frames
elif len(sample) == 2:
return sample[0], sample[1], self.frames
elif len(sample) == 3:
return sample[0], sample[1], sample[2]
else:
raise NotImplementedError
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = models[0]
model.eval()
logger.info("rendering starts. {}".format(model.text))
output_path = self.output_dir
image_names = []
sample, step, frames = self.parse_sample(sample)
# fix the rendering size
a = sample['size'][0,0,0] / self.resolution[0]
b = sample['size'][0,0,1] / self.resolution[1]
sample['size'][:, :, 0] /= a
sample['size'][:, :, 1] /= b
sample['size'][:, :, 2] *= a
sample['size'][:, :, 3] *= b
for shape in range(sample['shape'].size(0)):
max_step = step + frames
while step < max_step:
next_step = min(step + self.beam, max_step)
uv, inv_RT = zip(*[
self.generate_rays(
k,
sample['intrinsics'][shape],
sample['size'][shape, 0],
self.test_poses[k] if self.test_poses is not None else None)
for k in range(step, next_step)
])
if self.test_frameids is not None:
assert next_step - step == 1
ids = torch.tensor(self.test_frameids[step: next_step]).type_as(sample['id'])
else:
ids = sample['id'][shape:shape+1]
real_images = sample['full_rgb'] if 'full_rgb' in sample else sample['colors']
real_images = real_images.transpose(2, 3) if real_images.size(-1) != 3 else real_images
_sample = {
'id': ids,
'colors': torch.cat([real_images[shape:shape+1] for _ in range(step, next_step)], 1),
'intrinsics': sample['intrinsics'][shape:shape+1],
'extrinsics': torch.stack(inv_RT, 0).unsqueeze(0),
'uv': torch.stack(uv, 0).unsqueeze(0),
'shape': sample['shape'][shape:shape+1],
'view': torch.arange(
step, next_step,
device=sample['shape'].device).unsqueeze(0),
'size': torch.cat([sample['size'][shape:shape+1] for _ in range(step, next_step)], 1),
'step': step
}
with data_utils.GPUTimer() as timer:
outs = model(**_sample)
logger.info("rendering frame={}\ttotal time={:.4f}".format(step, timer.sum))
for k in range(step, next_step):
images = model.visualize(_sample, None, 0, k-step)
image_name = "{:04d}".format(k)
for key in images:
name, type = key.split('/')[0].split('_')
if type in self.output_type:
if name == 'coarse':
type = 'coarse-' + type
if name == 'target':
continue
prefix = os.path.join(output_path, type)
Path(prefix).mkdir(parents=True, exist_ok=True)
if type == 'point':
data_utils.save_point_cloud(
os.path.join(prefix, image_name + '.ply'),
images[key][:, :3].cpu().numpy(),
(images[key][:, 3:] * 255).cpu().int().numpy())
# from fairseq import pdb; pdb.set_trace()
else:
image = images[key].permute(2, 0, 1) \
if images[key].dim() == 3 else torch.stack(3*[images[key]], 0)
save_image(image, os.path.join(prefix, image_name + '.png'), format=None)
image_names.append(os.path.join(prefix, image_name + '.png'))
# save pose matrix
prefix = os.path.join(output_path, 'pose')
Path(prefix).mkdir(parents=True, exist_ok=True)
pose = self.test_poses[k] if self.test_poses is not None else inv_RT[k-step].cpu().numpy()
np.savetxt(os.path.join(prefix, image_name + '.txt'), pose)
step = next_step
logger.info("done")
return step, image_names
def save_images(self, output_files, steps=None, combine_output=True):
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
timestamp = time.strftime('%Y-%m-%d.%H-%M-%S',time.localtime(time.time()))
if steps is not None:
timestamp = "step_{}.".format(steps) + timestamp
if not combine_output:
for type in self.output_type:
images = [imageio.imread(file_path) for file_path in output_files if type in file_path]
# imageio.mimsave('{}/{}_{}.gif'.format(self.output_dir, type, timestamp), images, fps=self.fps)
imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, type, timestamp), images, fps=self.fps, quality=8)
else:
images = [[imageio.imread(file_path) for file_path in output_files if type == file_path.split('/')[-2]] for type in self.output_type]
images = [np.concatenate([images[j][i] for j in range(len(images))], 1) for i in range(len(images[0]))]
imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, 'full', timestamp), images, fps=self.fps, quality=8)
return timestamp
def merge_videos(self, timestamps):
logger.info("mergining mp4 files..")
timestamp = time.strftime('%Y-%m-%d.%H-%M-%S',time.localtime(time.time()))
writer = imageio.get_writer(
os.path.join(self.output_dir, 'full_' + timestamp + '.mp4'), fps=self.fps)
for timestamp in timestamps:
tempfile = os.path.join(self.output_dir, 'full_' + timestamp + '.mp4')
reader = imageio.get_reader(tempfile)
for im in reader:
writer.append_data(im)
writer.close()
for timestamp in timestamps:
tempfile = os.path.join(self.output_dir, 'full_' + timestamp + '.mp4')
os.remove(tempfile)
| 10,725 | 41.904 | 145 |
py
|
NSVF
|
NSVF-main/fairnr/options.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
from fairseq import options
def parse_args_and_arch(*args, **kwargs):
return options.parse_args_and_arch(*args, **kwargs)
def get_rendering_parser(default_task="single_object_rendering"):
parser = options.get_parser("Rendering", default_task)
options.add_dataset_args(parser, gen=True)
add_rendering_args(parser)
return parser
def add_rendering_args(parser):
group = parser.add_argument_group("Rendering")
options.add_common_eval_args(group)
group.add_argument("--render-beam", default=5, type=int, metavar="N",
help="beam size for parallel rendering")
group.add_argument("--render-resolution", default="512x512", type=str, metavar="N", help='if provide two numbers, means H x W')
group.add_argument("--render-angular-speed", default=1, type=float, metavar="D",
help="angular speed when rendering around the object")
group.add_argument("--render-num-frames", default=500, type=int, metavar="N")
group.add_argument("--render-path-style", default="circle", choices=["circle", "zoomin_circle", "zoomin_line"], type=str)
group.add_argument("--render-path-args", default="{'radius': 2.5, 'h': 0.0}",
help="specialized arguments for rendering paths")
group.add_argument("--render-output", default=None, type=str)
group.add_argument("--render-at-vector", default="(0,0,0)", type=str)
group.add_argument("--render-up-vector", default="(0,0,-1)", type=str)
group.add_argument("--render-output-types", nargs="+", type=str, default=["color"],
choices=["target", "color", "depth", "normal", "voxel", "predn", "point", "featn2", "vcolors"])
group.add_argument("--render-raymarching-steps", default=None, type=int)
group.add_argument("--render-save-fps", default=24, type=int)
group.add_argument("--render-combine-output", action='store_true',
help="if set, concat the images into one file.")
group.add_argument("--render-camera-poses", default=None, type=str,
help="text file saved for the testing trajectories")
group.add_argument("--render-camera-intrinsics", default=None, type=str)
group.add_argument("--render-views", type=str, default=None,
help="views sampled for rendering, you can set specific view id, or a range")
| 2,595 | 50.92 | 131 |
py
|
NSVF
|
NSVF-main/fairnr/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
class ResetTrainerException(Exception):
pass
from . import data, tasks, models, modules, criterions
| 285 | 22.833333 | 65 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.