corpus_id
stringlengths 7
12
| paper_id
stringlengths 9
16
| title
stringlengths 1
261
| abstract
stringlengths 70
4.02k
| source
stringclasses 1
value | bibtex
stringlengths 208
20.9k
| citation_key
stringlengths 6
100
|
---|---|---|---|---|---|---|
arxiv-666901 | 2410.05701 | Balancing Pareto Front exploration of Non-dominated Tournament Genetic Algorithm (B-NTGA) in solving multi-objective NP-hard problems with constraints | <|reference_start|>Balancing Pareto Front exploration of Non-dominated Tournament Genetic Algorithm (B-NTGA) in solving multi-objective NP-hard problems with constraints: The paper presents a new balanced selection operator applied to the proposed Balanced Non-dominated Tournament Genetic Algorithm (B-NTGA) that actively uses archive to solve multi- and many-objective NP-hard combinatorial optimization problems with constraints. The primary motivation is to make B-NTGA more efficient in exploring Pareto Front Approximation (PFa), focusing on 'gaps' and reducing some PFa regions' sampling too frequently. Such a balancing mechanism allows B-NTGA to be more adaptive and focus on less explored PFa regions. The proposed B-NTGA is investigated on two benchmark multi- and many-objective optimization real-world problems, like Thief Traveling Problem and Multi-Skill Resource-Constrained Project Scheduling Problem. The results of experiments show that B-NTGA has a higher efficiency and better performance than state-of-the-art methods.<|reference_end|> | arxiv | @article{antkiewicz2024balancing,
title={Balancing Pareto Front exploration of Non-dominated Tournament Genetic
Algorithm (B-NTGA) in solving multi-objective NP-hard problems with
constraints},
author={Micha{l} Antkiewicz, Pawe{l} B. Myszkowski},
journal={Information Sciences, 2024, 667: 120400},
year={2024},
doi={10.1016/j.ins.2024.120400},
archivePrefix={arXiv},
eprint={2410.05701},
primaryClass={cs.NE}
} | antkiewicz2024balancing |
arxiv-666902 | 2410.05702 | Data Informativity for Quadratic Stabilization under Data Perturbation | <|reference_start|>Data Informativity for Quadratic Stabilization under Data Perturbation: Assessing data informativity, determining whether the measured data contains sufficient information for a specific control objective, is a fundamental challenge in data-driven control. In noisy scenarios, existing studies deal with system noise and measurement noise separately, using quadratic matrix inequalities. Moreover, the analysis of measurement noise requires restrictive assumptions on noise properties. To provide a unified framework without any restrictions, this study introduces data perturbation, a novel notion that encompasses both existing noise models. It is observed that the admissible system set with data perturbation does not meet preconditions necessary for applying the key lemma in the matrix S-procedure. Our analysis overcomes this limitation by developing an extended version of this lemma, making it applicable to data perturbation. Our results unify the existing analyses while eliminating the need for restrictive assumptions made in the measurement noise scenario.<|reference_end|> | arxiv | @article{kaminaga2024data,
title={Data Informativity for Quadratic Stabilization under Data Perturbation},
author={Taira Kaminaga and Hampei Sasahara},
journal={arXiv preprint arXiv:2410.05702},
year={2024},
archivePrefix={arXiv},
eprint={2410.05702},
primaryClass={math.OC cs.SY eess.SY}
} | kaminaga2024data |
arxiv-666903 | 2410.05707 | A First-Order Algorithm for Graph Learning from Smooth Signals Under Partial Observability | <|reference_start|>A First-Order Algorithm for Graph Learning from Smooth Signals Under Partial Observability: Learning graph structures from smooth signals is a significant problem in data science and engineering. A common challenge in real-world scenarios is the availability of only partially observed nodes. While some studies have considered hidden nodes and proposed various optimization frameworks, existing methods often lack the practical efficiency needed for large-scale networks or fail to provide theoretical convergence guarantees. In this paper, we address the problem of inferring network topologies from smooth signals with partially observed nodes. We propose a first-order algorithmic framework that includes two variants: one based on column sparsity regularization and the other on a low-rank constraint. We establish theoretical convergence guarantees and demonstrate the linear convergence rate of our algorithms. Extensive experiments on both synthetic and real-world data show that our results align with theoretical predictions, exhibiting not only linear convergence but also superior speed compared to existing methods. To the best of our knowledge, this is the first work to propose a first-order algorithmic framework for inferring network structures from smooth signals under partial observability, offering both guaranteed linear convergence and practical effectiveness for large-scale networks.<|reference_end|> | arxiv | @article{peng2024network,
title={Network Topology Inference from Smooth Signals Under Partial
Observability},
author={Chuansen Peng, Hanning Tang, Zhiguo Wang and Xiaojing Shen},
journal={arXiv preprint arXiv:2410.05707},
year={2024},
archivePrefix={arXiv},
eprint={2410.05707},
primaryClass={cs.LG}
} | peng2024network |
arxiv-666904 | 2410.05710 | PixLens: A Novel Framework for Disentangled Evaluation in Diffusion-Based Image Editing with Object Detection + SAM | <|reference_start|>PixLens: A Novel Framework for Disentangled Evaluation in Diffusion-Based Image Editing with Object Detection + SAM: Evaluating diffusion-based image-editing models is a crucial task in the field of Generative AI. Specifically, it is imperative to assess their capacity to execute diverse editing tasks while preserving the image content and realism. While recent developments in generative models have opened up previously unheard-of possibilities for image editing, conducting a thorough evaluation of these models remains a challenging and open task. The absence of a standardized evaluation benchmark, primarily due to the inherent need for a post-edit reference image for evaluation, further complicates this issue. Currently, evaluations often rely on established models such as CLIP or require human intervention for a comprehensive understanding of the performance of these image editing models. Our benchmark, PixLens, provides a comprehensive evaluation of both edit quality and latent representation disentanglement, contributing to the advancement and refinement of existing methodologies in the field.<|reference_end|> | arxiv | @article{stefanache2024pixlens:,
title={PixLens: A Novel Framework for Disentangled Evaluation in
Diffusion-Based Image Editing with Object Detection + SAM},
author={Stefan Stefanache, Llu'is Pastor P'erez, Julen Costa Watanabe,
Ernesto Sanchez Tejedor, Thomas Hofmann, Enis Simsar},
journal={arXiv preprint arXiv:2410.05710},
year={2024},
archivePrefix={arXiv},
eprint={2410.05710},
primaryClass={cs.CV cs.AI}
} | stefanache2024pixlens: |
arxiv-666905 | 2410.05711 | Diffusion Auto-regressive Transformer for Effective Self-supervised Time Series Forecasting | <|reference_start|>Diffusion Auto-regressive Transformer for Effective Self-supervised Time Series Forecasting: Self-supervised learning has become a popular and effective approach for enhancing time series forecasting, enabling models to learn universal representations from unlabeled data. However, effectively capturing both the global sequence dependence and local detail features within time series data remains challenging. To address this, we propose a novel generative self-supervised method called TimeDART, denoting Diffusion Auto-regressive Transformer for Time series forecasting. In TimeDART, we treat time series patches as basic modeling units. Specifically, we employ an self-attention based Transformer encoder to model the dependencies of inter-patches. Additionally, we introduce diffusion and denoising mechanisms to capture the detail locality features of intra-patch. Notably, we design a cross-attention-based denoising decoder that allows for adjustable optimization difficulty in the self-supervised task, facilitating more effective self-supervised pre-training. Furthermore, the entire model is optimized in an auto-regressive manner to obtain transferable representations. Extensive experiments demonstrate that TimeDART achieves state-of-the-art fine-tuning performance compared to the most advanced competitive methods in forecasting tasks. Our code is publicly available at https://github.com/Melmaphother/TimeDART.<|reference_end|> | arxiv | @article{wang2024diffusion,
title={Diffusion Auto-regressive Transformer for Effective Self-supervised Time
Series Forecasting},
author={Daoyu Wang, Mingyue Cheng, Zhiding Liu, Qi Liu, Enhong Chen},
journal={arXiv preprint arXiv:2410.05711},
year={2024},
archivePrefix={arXiv},
eprint={2410.05711},
primaryClass={cs.LG}
} | wang2024diffusion |
arxiv-666906 | 2410.05712 | Advancing VR Simulators for Autonomous Vehicle-Pedestrian Interactions: A Focus on Multi-Entity Scenarios | <|reference_start|>Advancing VR Simulators for Autonomous Vehicle-Pedestrian Interactions: A Focus on Multi-Entity Scenarios: Recent research has increasingly focused on how autonomous vehicles (AVs) communicate with pedestrians in complex traffic situations involving multiple vehicles and pedestrians. VR is emerging as an effective tool to simulate these multi-entity scenarios, offering a safe and controlled study environment. Despite its growing use, there is a lack of thorough investigation into the effectiveness of these VR simulations, leaving a notable gap in documented insights and lessons. This research undertook a retrospective analysis of two distinct VR-based studies: one focusing on multiple AV scenarios (N=32) and the other on multiple pedestrian scenarios (N=25). Central to our examination are the participants' sense of presence and their crossing behaviour. The findings highlighted key factors that either enhance or diminish the sense of presence in each simulation, providing considerations for future improvements. Furthermore, they underscore the influence of controlled scenarios on crossing behaviour and interactions with AVs, advocating for the exploration of more natural and interactive simulations that better reflect real-world AV and pedestrian dynamics. Through this study, we set a groundwork for advancing VR simulators to study complex interactions between AVs and pedestrians.<|reference_end|> | arxiv | @article{tran2024advancing,
title={Advancing VR Simulators for Autonomous Vehicle-Pedestrian Interactions:
A Focus on Multi-Entity Scenarios},
author={Tram Thi Minh Tran, Callum Parker},
journal={arXiv preprint arXiv:2410.05712},
year={2024},
doi={10.1016/j.trf.2024.10.006},
archivePrefix={arXiv},
eprint={2410.05712},
primaryClass={cs.HC}
} | tran2024advancing |
arxiv-666907 | 2410.05714 | Enhancing Temporal Modeling of Video LLMs via Time Gating | <|reference_start|>Enhancing Temporal Modeling of Video LLMs via Time Gating: Video Large Language Models (Video LLMs) have achieved impressive performance on video-and-language tasks, such as video question answering. However, most existing Video LLMs neglect temporal information in video data, leading to struggles with temporal-aware video understanding. To address this gap, we propose a Time Gating Video LLM (TG-Vid) designed to enhance temporal modeling through a novel Time Gating module (TG). The TG module employs a time gating mechanism on its sub-modules, comprising gating spatial attention, gating temporal attention, and gating MLP. This architecture enables our model to achieve a robust understanding of temporal information within videos. Extensive evaluation of temporal-sensitive video benchmarks (i.e., MVBench, TempCompass, and NExT-QA) demonstrates that our TG-Vid model significantly outperforms the existing Video LLMs. Further, comprehensive ablation studies validate that the performance gains are attributed to the designs of our TG module. Our code is available at https://github.com/LaVi-Lab/TG-Vid.<|reference_end|> | arxiv | @article{hu2024enhancing,
title={Enhancing Temporal Modeling of Video LLMs via Time Gating},
author={Zi-Yuan Hu, Yiwu Zhong, Shijia Huang, Michael R. Lyu, Liwei Wang},
journal={arXiv preprint arXiv:2410.05714},
year={2024},
archivePrefix={arXiv},
eprint={2410.05714},
primaryClass={cs.CV cs.AI cs.CL cs.LG}
} | hu2024enhancing |
arxiv-666908 | 2410.05715 | Demonstration Based Explainable AI for Learning from Demonstration Methods | <|reference_start|>Demonstration Based Explainable AI for Learning from Demonstration Methods: Learning from Demonstration (LfD) is a powerful type of machine learning that can allow novices to teach and program robots to complete various tasks. However, the learning process for these systems may still be difficult for novices to interpret and understand, making effective teaching challenging. Explainable artificial intelligence (XAI) aims to address this challenge by explaining a system to the user. In this work, we investigate XAI within LfD by implementing an adaptive explanatory feedback system on an inverse reinforcement learning (IRL) algorithm. The feedback is implemented by demonstrating selected learnt trajectories to users. The system adapts to user teaching by categorizing and then selectively sampling trajectories shown to a user, to show a representative sample of both successful and unsuccessful trajectories. The system was evaluated through a user study with 26 participants teaching a robot a navigation task. The results of the user study demonstrated that the proposed explanatory feedback system can improve robot performance, teaching efficiency and user understanding of the robot.<|reference_end|> | arxiv | @article{gu2024demonstration,
title={Demonstration Based Explainable AI for Learning from Demonstration
Methods},
author={Morris Gu, Elizabeth Croft, Dana Kulic},
journal={arXiv preprint arXiv:2410.05715},
year={2024},
archivePrefix={arXiv},
eprint={2410.05715},
primaryClass={cs.RO}
} | gu2024demonstration |
arxiv-666909 | 2410.05717 | Advancements in Road Lane Mapping: Comparative Fine-Tuning Analysis of Deep Learning-based Semantic Segmentation Methods Using Aerial Imagery | <|reference_start|>Advancements in Road Lane Mapping: Comparative Fine-Tuning Analysis of Deep Learning-based Semantic Segmentation Methods Using Aerial Imagery: This research addresses the need for high-definition (HD) maps for autonomous vehicles (AVs), focusing on road lane information derived from aerial imagery. While Earth observation data offers valuable resources for map creation, specialized models for road lane extraction are still underdeveloped in remote sensing. In this study, we perform an extensive comparison of twelve foundational deep learning-based semantic segmentation models for road lane marking extraction from high-definition remote sensing images, assessing their performance under transfer learning with partially labeled datasets. These models were fine-tuned on the partially labeled Waterloo Urban Scene dataset, and pre-trained on the SkyScapes dataset, simulating a likely scenario of real-life model deployment under partial labeling. We observed and assessed the fine-tuning performance and overall performance. Models showed significant performance improvements after fine-tuning, with mean IoU scores ranging from 33.56% to 76.11%, and recall ranging from 66.0% to 98.96%. Transformer-based models outperformed convolutional neural networks, emphasizing the importance of model pre-training and fine-tuning in enhancing HD map development for AV navigation.<|reference_end|> | arxiv | @article{liu2024advancements,
title={Advancements in Road Lane Mapping: Comparative Fine-Tuning Analysis of
Deep Learning-based Semantic Segmentation Methods Using Aerial Imagery},
author={Willow Liu, Shuxin Qiao, Kyle Gao, Hongjie He, Michael A. Chapman,
Linlin Xu, Jonathan Li},
journal={arXiv preprint arXiv:2410.05717},
year={2024},
archivePrefix={arXiv},
eprint={2410.05717},
primaryClass={cs.CV}
} | liu2024advancements |
arxiv-666910 | 2410.05721 | Mero Nagarikta: Advanced Nepali Citizenship Data Extractor with Deep Learning-Powered Text Detection and OCR | <|reference_start|>Mero Nagarikta: Advanced Nepali Citizenship Data Extractor with Deep Learning-Powered Text Detection and OCR: Transforming text-based identity documents, such as Nepali citizenship cards, into a structured digital format poses several challenges due to the distinct characteristics of the Nepali script and minor variations in print alignment and contrast across different cards. This work proposes a robust system using YOLOv8 for accurate text object detection and an OCR algorithm based on Optimized PyTesseract. The system, implemented within the context of a mobile application, allows for the automated extraction of important textual information from both the front and the back side of Nepali citizenship cards, including names, citizenship numbers, and dates of birth. The final YOLOv8 model was accurate, with a mean average precision of 99.1% for text detection on the front and 96.1% on the back. The tested PyTesseract optimized for Nepali characters outperformed the standard OCR regarding flexibility and accuracy, extracting text from images with clean and noisy backgrounds and various contrasts. Using preprocessing steps such as converting the images into grayscale, removing noise from the images, and detecting edges further improved the system's OCR accuracy, even for low-quality photos. This work expands the current body of research in multilingual OCR and document analysis, especially for low-resource languages such as Nepali. It emphasizes the effectiveness of combining the latest object detection framework with OCR models that have been fine-tuned for practical applications.<|reference_end|> | arxiv | @article{dhakal2024mero,
title={Mero Nagarikta: Advanced Nepali Citizenship Data Extractor with Deep
Learning-Powered Text Detection and OCR},
author={Sisir Dhakal, Sujan Sigdel, Sandesh Prasad Paudel, Sharad Kumar
Ranabhat, Nabin Lamichhane},
journal={arXiv preprint arXiv:2410.05721},
year={2024},
archivePrefix={arXiv},
eprint={2410.05721},
primaryClass={cs.CV cs.AI}
} | dhakal2024mero |
arxiv-666911 | 2410.05725 | KnowledgeSG: Privacy-Preserving Synthetic Text Generation with Knowledge Distillation from Server | <|reference_start|>KnowledgeSG: Privacy-Preserving Synthetic Text Generation with Knowledge Distillation from Server: The success of large language models (LLMs) facilitate many parties to fine-tune LLMs on their own private data. However, this practice raises privacy concerns due to the memorization of LLMs. Existing solutions, such as utilizing synthetic data for substitution, struggle to simultaneously improve performance and preserve privacy. They either rely on a local model for generation, resulting in a performance decline, or take advantage of APIs, directly exposing the data to API servers. To address this issue, we propose KnowledgeSG, a novel client-server framework which enhances synthetic data quality and improves model performance while ensuring privacy. We achieve this by learning local knowledge from the private data with differential privacy (DP) and distilling professional knowledge from the server. Additionally, inspired by federated learning, we transmit models rather than data between the client and server to prevent privacy leakage. Extensive experiments in medical and financial domains demonstrate the effectiveness of KnowledgeSG. Our code is now publicly available at https://github.com/wwh0411/KnowledgeSG.<|reference_end|> | arxiv | @article{wang2024knowledgesg:,
title={KnowledgeSG: Privacy-Preserving Synthetic Text Generation with Knowledge
Distillation from Server},
author={Wenhao Wang, Xiaoyu Liang, Rui Ye, Jingyi Chai, Siheng Chen, Yanfeng
Wang},
journal={arXiv preprint arXiv:2410.05725},
year={2024},
archivePrefix={arXiv},
eprint={2410.05725},
primaryClass={cs.CR cs.AI}
} | wang2024knowledgesg: |
arxiv-666912 | 2410.05726 | Less is more: Embracing sparsity and interpolation with Esiformer for time series forecasting | <|reference_start|>Less is more: Embracing sparsity and interpolation with Esiformer for time series forecasting: Time series forecasting has played a significant role in many practical fields. But time series data generated from real-world applications always exhibits high variance and lots of noise, which makes it difficult to capture the inherent periodic patterns of the data, hurting the prediction accuracy significantly. To address this issue, we propose the Esiformer, which apply interpolation on the original data, decreasing the overall variance of the data and alleviating the influence of noise. What's more, we enhanced the vanilla transformer with a robust Sparse FFN. It can enhance the representation ability of the model effectively, and maintain the excellent robustness, avoiding the risk of overfitting compared with the vanilla implementation. Through evaluations on challenging real-world datasets, our method outperforms leading model PatchTST, reducing MSE by 6.5% and MAE by 5.8% in multivariate time series forecasting. Code is available at: https://github.com/yyg1282142265/Esiformer/tree/main.<|reference_end|> | arxiv | @article{guo2024less,
title={Less is more: Embracing sparsity and interpolation with Esiformer for
time series forecasting},
author={Yangyang Guo, Yanjun Zhao, Sizhe Dang, Tian Zhou, Liang Sun and Yi
Qian},
journal={arXiv preprint arXiv:2410.05726},
year={2024},
archivePrefix={arXiv},
eprint={2410.05726},
primaryClass={cs.LG cs.AI}
} | guo2024less |
arxiv-666913 | 2410.05728 | Reducing fuzzy relation equations via concept lattices | <|reference_start|>Reducing fuzzy relation equations via concept lattices: This paper has taken into advantage the relationship between Fuzzy Relation Equations (FRE) and Concept Lattices in order to introduce a procedure to reduce a FRE, without losing information. Specifically, attribute reduction theory in property-oriented and object-oriented concept lattices has been considered in order to present a mechanism for detecting redundant equations. As a first consequence, the computation of the whole solution set of a solvable FRE is reduced. Moreover, we will also introduce a novel method for computing approximate solutions of unsolvable FRE related to a (real) dataset with uncertainty/imprecision data.<|reference_end|> | arxiv | @article{lobo2024reducing,
title={Reducing fuzzy relation equations via concept lattices},
author={David Lobo, V'ictor L'opez-Marchante, Jes'us Medina},
journal={Fuzzy Sets and Systems 463 (2023) 108465},
year={2024},
doi={10.1016/j.fss.2022.12.021},
archivePrefix={arXiv},
eprint={2410.05728},
primaryClass={cs.AI}
} | lobo2024reducing |
arxiv-666914 | 2410.05729 | Equi-GSPR: Equivariant SE(3) Graph Network Model for Sparse Point Cloud Registration | <|reference_start|>Equi-GSPR: Equivariant SE(3) Graph Network Model for Sparse Point Cloud Registration: Point cloud registration is a foundational task for 3D alignment and reconstruction applications. While both traditional and learning-based registration approaches have succeeded, leveraging the intrinsic symmetry of point cloud data, including rotation equivariance, has received insufficient attention. This prohibits the model from learning effectively, resulting in a requirement for more training data and increased model complexity. To address these challenges, we propose a graph neural network model embedded with a local Spherical Euclidean 3D equivariance property through SE(3) message passing based propagation. Our model is composed mainly of a descriptor module, equivariant graph layers, match similarity, and the final regression layers. Such modular design enables us to utilize sparsely sampled input points and initialize the descriptor by self-trained or pre-trained geometric feature descriptors easily. Experiments conducted on the 3DMatch and KITTI datasets exhibit the compelling and robust performance of our model compared to state-of-the-art approaches, while the model complexity remains relatively low at the same time.<|reference_end|> | arxiv | @article{kang2024equi-gspr:,
title={Equi-GSPR: Equivariant SE(3) Graph Network Model for Sparse Point Cloud
Registration},
author={Xueyang Kang and Zhaoliang Luan and Kourosh Khoshelham and Bing Wang},
journal={arXiv preprint arXiv:2410.05729},
year={2024},
doi={10.1007/978-3-031-73235-5_9},
archivePrefix={arXiv},
eprint={2410.05729},
primaryClass={cs.CV cs.AI}
} | kang2024equi-gspr: |
arxiv-666915 | 2410.05731 | Enhancing SPARQL Generation by Triplet-order-sensitive Pre-training | <|reference_start|>Enhancing SPARQL Generation by Triplet-order-sensitive Pre-training: Semantic parsing that translates natural language queries to SPARQL is of great importance for Knowledge Graph Question Answering (KGQA) systems. Although pre-trained language models like T5 have achieved significant success in the Text-to-SPARQL task, their generated outputs still exhibit notable errors specific to the SPARQL language, such as triplet flips. To address this challenge and further improve the performance, we propose an additional pre-training stage with a new objective, Triplet Order Correction (TOC), along with the commonly used Masked Language Modeling (MLM), to collectively enhance the model's sensitivity to triplet order and SPARQL syntax. Our method achieves state-of-the-art performances on three widely-used benchmarks.<|reference_end|> | arxiv | @article{su2024enhancing,
title={Enhancing SPARQL Generation by Triplet-order-sensitive Pre-training},
author={Chang Su and Jiexing Qi and He Yan and Kai Zou and Zhouhan Lin},
journal={arXiv preprint arXiv:2410.05731},
year={2024},
archivePrefix={arXiv},
eprint={2410.05731},
primaryClass={cs.IR}
} | su2024enhancing |
arxiv-666916 | 2410.05733 | Private and Communication-Efficient Federated Learning based on Differentially Private Sketches | <|reference_start|>Private and Communication-Efficient Federated Learning based on Differentially Private Sketches: Federated learning (FL) faces two primary challenges: the risk of privacy leakage due to parameter sharing and communication inefficiencies. To address these challenges, we propose DPSFL, a federated learning method that utilizes differentially private sketches. DPSFL compresses the local gradients of each client using a count sketch, thereby improving communication efficiency, while adding noise to the sketches to ensure differential privacy (DP). We provide a theoretical analysis of privacy and convergence for the proposed method. Gradient clipping is essential in DP learning to limit sensitivity and constrain the addition of noise. However, clipping introduces bias into the gradients, negatively impacting FL performance. To mitigate the impact of clipping, we propose an enhanced method, DPSFL-AC, which employs an adaptive clipping strategy. Experimental comparisons with existing techniques demonstrate the superiority of our methods concerning privacy preservation, communication efficiency, and model accuracy.<|reference_end|> | arxiv | @article{zhang2024private,
title={Private and Communication-Efficient Federated Learning based on
Differentially Private Sketches},
author={Meifan Zhang, Zhanhong Xie and Lihua Yin},
journal={arXiv preprint arXiv:2410.05733},
year={2024},
archivePrefix={arXiv},
eprint={2410.05733},
primaryClass={cs.LG cs.CR}
} | zhang2024private |
arxiv-666917 | 2410.05734 | Diminishing Exploration: A Minimalist Approach to Piecewise Stationary Multi-Armed Bandits | <|reference_start|>Diminishing Exploration: A Minimalist Approach to Piecewise Stationary Multi-Armed Bandits: The piecewise-stationary bandit problem is an important variant of the multi-armed bandit problem that further considers abrupt changes in the reward distributions. The main theme of the problem is the trade-off between exploration for detecting environment changes and exploitation of traditional bandit algorithms. While this problem has been extensively investigated, existing works either assume knowledge about the number of change points $M$ or require extremely high computational complexity. In this work, we revisit the piecewise-stationary bandit problem from a minimalist perspective. We propose a novel and generic exploration mechanism, called diminishing exploration, which eliminates the need for knowledge about $M$ and can be used in conjunction with an existing change detection-based algorithm to achieve near-optimal regret scaling. Simulation results show that despite oblivious of $M$, equipping existing algorithms with the proposed diminishing exploration generally achieves better empirical regret than the traditional uniform exploration.<|reference_end|> | arxiv | @article{li2024diminishing,
title={Diminishing Exploration: A Minimalist Approach to Piecewise Stationary
Multi-Armed Bandits},
author={Kuan-Ta Li, Ping-Chun Hsieh, Yu-Chih Huang},
journal={arXiv preprint arXiv:2410.05734},
year={2024},
archivePrefix={arXiv},
eprint={2410.05734},
primaryClass={cs.LG cs.IT math.IT}
} | li2024diminishing |
arxiv-666918 | 2410.05735 | CUBE360: Learning Cubic Field Representation for Monocular 360 Depth Estimation for Virtual Reality | <|reference_start|>CUBE360: Learning Cubic Field Representation for Monocular 360 Depth Estimation for Virtual Reality: Panoramic images provide comprehensive scene information and are suitable for VR applications. Obtaining corresponding depth maps is essential for achieving immersive and interactive experiences. However, panoramic depth estimation presents significant challenges due to the severe distortion caused by equirectangular projection (ERP) and the limited availability of panoramic RGB-D datasets. Inspired by the recent success of neural rendering, we propose a novel method, named $\mathbf{CUBE360}$, that learns a cubic field composed of multiple MPIs from a single panoramic image for $\mathbf{continuous}$ depth estimation at any view direction. Our CUBE360 employs cubemap projection to transform an ERP image into six faces and extract the MPIs for each, thereby reducing the memory consumption required for MPI processing of high-resolution data. Additionally, this approach avoids the computational complexity of handling the uneven pixel distribution inherent to equirectangular projectio. An attention-based blending module is then employed to learn correlations among the MPIs of cubic faces, constructing a cubic field representation with color and density information at various depth levels. Furthermore, a novel sampling strategy is introduced for rendering novel views from the cubic field at both cubic and planar scales. The entire pipeline is trained using photometric loss calculated from rendered views within a self-supervised learning approach, enabling training on 360 videos without depth annotations. Experiments on both synthetic and real-world datasets demonstrate the superior performance of CUBE360 compared to prior SSL methods. We also highlight its effectiveness in downstream applications, such as VR roaming and visual effects, underscoring CUBE360's potential to enhance immersive experiences.<|reference_end|> | arxiv | @article{chang2024cube360:,
title={CUBE360: Learning Cubic Field Representation for Monocular 360 Depth
Estimation for Virtual Reality},
author={Wenjie Chang, Hao Ai, Tianzhu Zhang and Lin Wang},
journal={arXiv preprint arXiv:2410.05735},
year={2024},
archivePrefix={arXiv},
eprint={2410.05735},
primaryClass={cs.CV}
} | chang2024cube360: |
arxiv-666919 | 2410.05737 | Thrust Microstepping via Acceleration Feedback in Quadrotor Control for Aerial Grasping of Dynamic Payload | <|reference_start|>Thrust Microstepping via Acceleration Feedback in Quadrotor Control for Aerial Grasping of Dynamic Payload: In this work, we propose an end-to-end Thrust Microstepping and Decoupled Control (TMDC) of quadrotors. TMDC focuses on precise off-centered aerial grasping of payloads dynamically, which are attached rigidly to the UAV body via a gripper contrary to the swinging payload. The dynamic payload grasping quickly changes UAV's mass, inertia etc, causing instability while performing a grasping operation in-air. We identify that to handle unknown payload grasping, the role of thrust controller is crucial. Hence, we focus on thrust control without involving system parameters such as mass etc. TMDC is based on our novel Thrust Microstepping via Acceleration Feedback (TMAF) thrust controller and Decoupled Motion Control (DMC). TMAF precisely estimates the desired thrust even at smaller loop rates while DMC decouples the horizontal and vertical motion to counteract disturbances in the case of dynamic payloads. We prove the controller's efficacy via exhaustive experiments in practically interesting and adverse real-world cases, such as fully onboard state estimation without any positioning sensor, narrow and indoor flying workspaces with intense wind turbulence, heavy payloads, non-uniform loop rates, etc. Our TMDC outperforms recent direct acceleration feedback thrust controller (DA) and geometric tracking control (GT) in flying stably for aerial grasping and achieves RMSE below 0.04m in contrast to 0.15m of DA and 0.16m of GT.<|reference_end|> | arxiv | @article{kumar2024thrust,
title={Thrust Microstepping via Acceleration Feedback in Quadrotor Control for
Aerial Grasping of Dynamic Payload},
author={Ashish Kumar, Laxmidhar Behera},
journal={IEEE Robotics & Automation Letters, 2023},
year={2024},
archivePrefix={arXiv},
eprint={2410.05737},
primaryClass={cs.RO}
} | kumar2024thrust |
arxiv-666920 | 2410.05738 | Design, Localization, Perception, and Control for GPS-Denied Autonomous Aerial Grasping and Harvesting | <|reference_start|>Design, Localization, Perception, and Control for GPS-Denied Autonomous Aerial Grasping and Harvesting: In this paper, we present a comprehensive UAV system design to perform the highly complex task of off-centered aerial grasping. This task has several interdisciplinary research challenges which need to be addressed at once. The main design challenges are GPS-denied functionality, solely onboard computing, and avoiding off-the-shelf costly positioning systems. While in terms of algorithms, visual perception, localization, control, and grasping are the leading research problems. Hence in this paper, we make interdisciplinary contributions: (i) A detailed description of the fundamental challenges in indoor aerial grasping, (ii) a novel lightweight gripper design, (iii) a complete aerial platform design and in-lab fabrication, and (iv) localization, perception, control, grasping systems, and an end-to-end flight autonomy state-machine. Finally, we demonstrate the resulting aerial grasping system Drone-Bee achieving a high grasping rate for a highly challenging agricultural task of apple-like fruit harvesting, indoors in a vertical farming setting (Fig. 1). To our knowledge, such a system has not been previously discussed in the literature, and with its capabilities, this system pushes aerial manipulation towards 4th generation.<|reference_end|> | arxiv | @article{kumar2024design,,
title={Design, Localization, Perception, and Control for GPS-Denied Autonomous
Aerial Grasping and Harvesting},
author={Ashish Kumar, Laxmidhar Behera},
journal={IEEE Robotics & Automation Letters, 2024},
year={2024},
archivePrefix={arXiv},
eprint={2410.05738},
primaryClass={cs.RO}
} | kumar2024design, |
arxiv-666921 | 2410.05739 | Array2BR: An End-to-End Noise-immune Binaural Audio Synthesis from Microphone-array Signals | <|reference_start|>Array2BR: An End-to-End Noise-immune Binaural Audio Synthesis from Microphone-array Signals: Telepresence technology aims to provide an immersive virtual presence for remote conference applications, and it is extremely important to synthesize high-quality binaural audio signals for this aim. Because the ambient noise is often inevitable in practical application scenarios, it is highly desired that binaural audio signals without noise can be obtained from microphone-array signals directly. For this purpose, this paper proposes a new end-to-end noise-immune binaural audio synthesis framework from microphone-array signals, abbreviated as Array2BR, and experimental results show that binaural cues can be correctly mapped and noise can be well suppressed simultaneously using the proposed framework. Compared with existing methods, the proposed method achieved better performance in terms of both objective and subjective metric scores.<|reference_end|> | arxiv | @article{chi2024array2br:,
title={Array2BR: An End-to-End Noise-immune Binaural Audio Synthesis from
Microphone-array Signals},
author={Cheng Chi, Xiaoyu Li, Andong Li, Yuxuan Ke, Xiaodong Li, Chengshi
Zheng},
journal={arXiv preprint arXiv:2410.05739},
year={2024},
archivePrefix={arXiv},
eprint={2410.05739},
primaryClass={cs.SD cs.AI eess.AS}
} | chi2024array2br: |
arxiv-666922 | 2410.05740 | Learning to Race in Extreme Turning Scene with Active Exploration and Gaussian Process Regression-based MPC | <|reference_start|>Learning to Race in Extreme Turning Scene with Active Exploration and Gaussian Process Regression-based MPC: Extreme cornering in racing often induces large side-slip angles, presenting a formidable challenge in vehicle control. To tackle this issue, this paper introduces an Active Exploration with Double GPR (AEDGPR) system. The system initiates by planning a minimum-time trajectory with a Gaussian Process Regression(GPR) compensated model. The planning results show that in the cornering section, the yaw angular velocity and side-slip angle are in opposite directions, indicating that the vehicle is drifting. In response, we develop a drift controller based on Model Predictive Control (MPC) and incorporate Gaussian Process Regression to correct discrepancies in the vehicle dynamics model. Moreover, the covariance from the GPR is employed to actively explore various cornering states, aiming to minimize trajectory tracking errors. The proposed algorithm is validated through simulations on the Simulink-Carsim platform and experiments using a 1/10 scale RC vehicle.<|reference_end|> | arxiv | @article{wu2024learning,
title={Learning to Race in Extreme Turning Scene with Active Exploration and
Gaussian Process Regression-based MPC},
author={Guoqiang Wu, Cheng Hu, Wangjia Weng, Zhouheng Li, Yonghao Fu, Lei Xie
and Hongye Su},
journal={arXiv preprint arXiv:2410.05740},
year={2024},
archivePrefix={arXiv},
eprint={2410.05740},
primaryClass={cs.RO cs.AI cs.SY eess.SY}
} | wu2024learning |
arxiv-666923 | 2410.05746 | Wolf2Pack: The AutoFusion Framework for Dynamic Parameter Fusion | <|reference_start|>Wolf2Pack: The AutoFusion Framework for Dynamic Parameter Fusion: In the rapidly evolving field of deep learning, specialized models have driven significant advancements in tasks such as computer vision and natural language processing. However, this specialization leads to a fragmented ecosystem where models lack the adaptability for broader applications. To overcome this, we introduce AutoFusion, an innovative framework that fuses distinct model parameters(with the same architecture) for multi-task learning without pre-trained checkpoints. Using an unsupervised, end-to-end approach, AutoFusion dynamically permutes model parameters at each layer, optimizing the combination through a loss-minimization process that does not require labeled data. We validate AutoFusion's effectiveness through experiments on commonly used benchmark datasets, demonstrating superior performance over established methods like Weight Interpolation, Git Re-Basin, and ZipIt. Our framework offers a scalable and flexible solution for model integration, positioning it as a powerful tool for future research and practical applications.<|reference_end|> | arxiv | @article{tian2024wolf2pack:,
title={Wolf2Pack: The AutoFusion Framework for Dynamic Parameter Fusion},
author={Bowen Tian, Songning Lai, Yutao Yue},
journal={arXiv preprint arXiv:2410.05746},
year={2024},
archivePrefix={arXiv},
eprint={2410.05746},
primaryClass={cs.CV cs.LG}
} | tian2024wolf2pack: |
arxiv-666924 | 2410.05748 | Label Confidence Weighted Learning for Target-level Sentence Simplification | <|reference_start|>Label Confidence Weighted Learning for Target-level Sentence Simplification: Multi-level sentence simplification generates simplified sentences with varying language proficiency levels. We propose Label Confidence Weighted Learning (LCWL), a novel approach that incorporates a label confidence weighting scheme in the training loss of the encoder-decoder model, setting it apart from existing confidence-weighting methods primarily designed for classification. Experimentation on English grade-level simplification dataset shows that LCWL outperforms state-of-the-art unsupervised baselines. Fine-tuning the LCWL model on in-domain data and combining with Symmetric Cross Entropy (SCE) consistently delivers better simplifications compared to strong supervised methods. Our results highlight the effectiveness of label confidence weighting techniques for text simplification tasks with encoder-decoder architectures.<|reference_end|> | arxiv | @article{qiu2024label,
title={Label Confidence Weighted Learning for Target-level Sentence
Simplification},
author={Xinying Qiu and Jingshen Zhang},
journal={arXiv preprint arXiv:2410.05748},
year={2024},
archivePrefix={arXiv},
eprint={2410.05748},
primaryClass={cs.CL}
} | qiu2024label |
arxiv-666925 | 2410.05750 | Polynomial Time Cryptanalytic Extraction of Deep Neural Networks in the Hard-Label Setting | <|reference_start|>Polynomial Time Cryptanalytic Extraction of Deep Neural Networks in the Hard-Label Setting: Deep neural networks (DNNs) are valuable assets, yet their public accessibility raises security concerns about parameter extraction by malicious actors. Recent work by Carlini et al. (crypto'20) and Canales-Mart\'inez et al. (eurocrypt'24) has drawn parallels between this issue and block cipher key extraction via chosen plaintext attacks. Leveraging differential cryptanalysis, they demonstrated that all the weights and biases of black-box ReLU-based DNNs could be inferred using a polynomial number of queries and computational time. However, their attacks relied on the availability of the exact numeric value of output logits, which allowed the calculation of their derivatives. To overcome this limitation, Chen et al. (asiacrypt'24) tackled the more realistic hard-label scenario, where only the final classification label (e.g., "dog" or "car") is accessible to the attacker. They proposed an extraction method requiring a polynomial number of queries but an exponential execution time. In addition, their approach was applicable only to a restricted set of architectures, could deal only with binary classifiers, and was demonstrated only on tiny neural networks with up to four neurons split among up to two hidden layers. This paper introduces new techniques that, for the first time, achieve cryptanalytic extraction of DNN parameters in the most challenging hard-label setting, using both a polynomial number of queries and polynomial time. We validate our approach by extracting nearly one million parameters from a DNN trained on the CIFAR-10 dataset, comprising 832 neurons in four hidden layers. Our results reveal the surprising fact that all the weights of a ReLU-based DNN can be efficiently determined by analyzing only the geometric shape of its decision boundaries.<|reference_end|> | arxiv | @article{carlini2024polynomial,
title={Polynomial Time Cryptanalytic Extraction of Deep Neural Networks in the
Hard-Label Setting},
author={Nicholas Carlini, Jorge Ch'avez-Saab, Anna Hambitzer, Francisco
Rodr'iguez-Henr'iquez, Adi Shamir},
journal={arXiv preprint arXiv:2410.05750},
year={2024},
archivePrefix={arXiv},
eprint={2410.05750},
primaryClass={cs.CR cs.AI}
} | carlini2024polynomial |
arxiv-666926 | 2410.05752 | Exploring the Meaningfulness of Nearest Neighbor Search in High-Dimensional Space | <|reference_start|>Exploring the Meaningfulness of Nearest Neighbor Search in High-Dimensional Space: Dense high dimensional vectors are becoming increasingly vital in fields such as computer vision, machine learning, and large language models (LLMs), serving as standard representations for multimodal data. Now the dimensionality of these vector can exceed several thousands easily. Despite the nearest neighbor search (NNS) over these dense high dimensional vectors have been widely used for retrieval augmented generation (RAG) and many other applications, the effectiveness of NNS in such a high-dimensional space remains uncertain, given the possible challenge caused by the "curse of dimensionality." To address above question, in this paper, we conduct extensive NNS studies with different distance functions, such as $L_1$ distance, $L_2$ distance and angular-distance, across diverse embedding datasets, of varied types, dimensionality and modality. Our aim is to investigate factors influencing the meaningfulness of NNS. Our experiments reveal that high-dimensional text embeddings exhibit increased resilience as dimensionality rises to higher levels when compared to random vectors. This resilience suggests that text embeddings are less affected to the "curse of dimensionality," resulting in more meaningful NNS outcomes for practical use. Additionally, the choice of distance function has minimal impact on the relevance of NNS. Our study shows the effectiveness of the embedding-based data representation method and can offer opportunity for further optimization of dense vector-related applications.<|reference_end|> | arxiv | @article{chen2024exploring,
title={Exploring the Meaningfulness of Nearest Neighbor Search in
High-Dimensional Space},
author={Zhonghan Chen, Ruiyuan Zhang, Xi Zhao, Xiaojun Cheng, Xiaofang Zhou},
journal={arXiv preprint arXiv:2410.05752},
year={2024},
archivePrefix={arXiv},
eprint={2410.05752},
primaryClass={cs.LG cs.DB cs.IR}
} | chen2024exploring |
arxiv-666927 | 2410.05753 | Pathwise Gradient Variance Reduction with Control Variates in Variational Inference | <|reference_start|>Pathwise Gradient Variance Reduction with Control Variates in Variational Inference: Variational inference in Bayesian deep learning often involves computing the gradient of an expectation that lacks a closed-form solution. In these cases, pathwise and score-function gradient estimators are the most common approaches. The pathwise estimator is often favoured for its substantially lower variance compared to the score-function estimator, which typically requires variance reduction techniques. However, recent research suggests that even pathwise gradient estimators could benefit from variance reduction. In this work, we review existing control-variates-based variance reduction methods for pathwise gradient estimators to assess their effectiveness. Notably, these methods often rely on integrand approximations and are applicable only to simple variational families. To address this limitation, we propose applying zero-variance control variates to pathwise gradient estimators. This approach offers the advantage of requiring minimal assumptions about the variational distribution, other than being able to sample from it.<|reference_end|> | arxiv | @article{ng2024pathwise,
title={Pathwise Gradient Variance Reduction with Control Variates in
Variational Inference},
author={Kenyon Ng, Susan Wei},
journal={arXiv preprint arXiv:2410.05753},
year={2024},
archivePrefix={arXiv},
eprint={2410.05753},
primaryClass={stat.ML cs.LG stat.CO stat.ME}
} | ng2024pathwise |
arxiv-666928 | 2410.05754 | Simple Relative Deviation Bounds for Covariance and Gram Matrices | <|reference_start|>Simple Relative Deviation Bounds for Covariance and Gram Matrices: We provide non-asymptotic, relative deviation bounds for the eigenvalues of empirical covariance and gram matrices in general settings. Unlike typical uniform bounds, which may fail to capture the behavior of smaller eigenvalues, our results provide sharper control across the spectrum. Our analysis is based on a general-purpose theorem that allows one to convert existing uniform bounds into relative ones. The theorems and techniques emphasize simplicity and should be applicable across various settings.<|reference_end|> | arxiv | @article{barzilai2024simple,
title={Simple Relative Deviation Bounds for Covariance and Gram Matrices},
author={Daniel Barzilai, Ohad Shamir},
journal={arXiv preprint arXiv:2410.05754},
year={2024},
archivePrefix={arXiv},
eprint={2410.05754},
primaryClass={math.PR cs.LG math.ST stat.ML stat.TH}
} | barzilai2024simple |
arxiv-666929 | 2410.05756 | Learning the Generalizable Manipulation Skills on Soft-body Tasks via Guided Self-attention Behavior Cloning Policy | <|reference_start|>Learning the Generalizable Manipulation Skills on Soft-body Tasks via Guided Self-attention Behavior Cloning Policy: Embodied AI represents a paradigm in AI research where artificial agents are situated within and interact with physical or virtual environments. Despite the recent progress in Embodied AI, it is still very challenging to learn the generalizable manipulation skills that can handle large deformation and topological changes on soft-body objects, such as clay, water, and soil. In this work, we proposed an effective policy, namely GP2E behavior cloning policy, which can guide the agent to learn the generalizable manipulation skills from soft-body tasks, including pouring, filling, hanging, excavating, pinching, and writing. Concretely, we build our policy from three insights:(1) Extracting intricate semantic features from point cloud data and seamlessly integrating them into the robot's end-effector frame; (2) Capturing long-distance interactions in long-horizon tasks through the incorporation of our guided self-attention module; (3) Mitigating overfitting concerns and facilitating model convergence to higher accuracy levels via the introduction of our two-stage fine-tuning strategy. Through extensive experiments, we demonstrate the effectiveness of our approach by achieving the 1st prize in the soft-body track of the ManiSkill2 Challenge at the CVPR 2023 4th Embodied AI workshop. Our findings highlight the potential of our method to improve the generalization abilities of Embodied AI models and pave the way for their practical applications in real-world scenarios.<|reference_end|> | arxiv | @article{li2024learning,
title={Learning the Generalizable Manipulation Skills on Soft-body Tasks via
Guided Self-attention Behavior Cloning Policy},
author={Xuetao Li, Fang Gao, Jun Yu, Shaodong Li, Feng Shuang},
journal={arXiv preprint arXiv:2410.05756},
year={2024},
archivePrefix={arXiv},
eprint={2410.05756},
primaryClass={cs.RO cs.AI}
} | li2024learning |
arxiv-666930 | 2410.05757 | Temperature Optimization for Bayesian Deep Learning | <|reference_start|>Temperature Optimization for Bayesian Deep Learning: The Cold Posterior Effect (CPE) is a phenomenon in Bayesian Deep Learning (BDL), where tempering the posterior to a cold temperature often improves the predictive performance of the posterior predictive distribution (PPD). Although the term `CPE' suggests colder temperatures are inherently better, the BDL community increasingly recognizes that this is not always the case. Despite this, there remains no systematic method for finding the optimal temperature beyond grid search. In this work, we propose a data-driven approach to select the temperature that maximizes test log-predictive density, treating the temperature as a model parameter and estimating it directly from the data. We empirically demonstrate that our method performs comparably to grid search, at a fraction of the cost, across both regression and classification tasks. Finally, we highlight the differing perspectives on CPE between the BDL and Generalized Bayes communities: while the former primarily focuses on predictive performance of the PPD, the latter emphasizes calibrated uncertainty and robustness to model misspecification; these distinct objectives lead to different temperature preferences.<|reference_end|> | arxiv | @article{ng2024temperature,
title={Temperature Optimization for Bayesian Deep Learning},
author={Kenyon Ng, Chris van der Heide, Liam Hodgkinson, Susan Wei},
journal={arXiv preprint arXiv:2410.05757},
year={2024},
archivePrefix={arXiv},
eprint={2410.05757},
primaryClass={stat.ML cs.LG stat.CO stat.ME}
} | ng2024temperature |
arxiv-666931 | 2410.05759 | 3D UAV Trajectory Planning for IoT Data Collection via Matrix-Based Evolutionary Computation | <|reference_start|>3D UAV Trajectory Planning for IoT Data Collection via Matrix-Based Evolutionary Computation: UAVs are increasingly becoming vital tools in various wireless communication applications including internet of things (IoT) and sensor networks, thanks to their rapid and agile non-terrestrial mobility. Despite recent research, planning three-dimensional (3D) UAV trajectories over a continuous temporal-spatial domain remains challenging due to the need to solve computationally intensive optimization problems. In this paper, we study UAV-assisted IoT data collection aimed at minimizing total energy consumption while accounting for the UAV's physical capabilities, the heterogeneous data demands of IoT nodes, and 3D terrain. We propose a matrix-based differential evolution with constraint handling (MDE-CH), a computation-efficient evolutionary algorithm designed to address non-convex constrained optimization problems with several different types of constraints. Numerical evaluations demonstrate that the proposed MDE-CH algorithm provides a continuous 3D temporal-spatial UAV trajectory capable of efficiently minimizing energy consumption under various practical constraints and outperforms the conventional fly-hover-fly model for both two-dimensional (2D) and 3D trajectory planning.<|reference_end|> | arxiv | @article{sun20243d,
title={3D UAV Trajectory Planning for IoT Data Collection via Matrix-Based
Evolutionary Computation},
author={Pei-Fa Sun, Yujae Song, Kang-Yu Gao, Yu-Kai Wang, Changjun Zhou,
Sang-Woon Jeon, Jun Zhang},
journal={arXiv preprint arXiv:2410.05759},
year={2024},
archivePrefix={arXiv},
eprint={2410.05759},
primaryClass={cs.NE}
} | sun20243d |
arxiv-666932 | 2410.05760 | Training-free Diffusion Model Alignment with Sampling Demons | <|reference_start|>Training-free Diffusion Model Alignment with Sampling Demons: Aligning diffusion models with user preferences has been a key challenge. Existing methods for aligning diffusion models either require retraining or are limited to differentiable reward functions. To address these limitations, we propose a stochastic optimization approach, dubbed Demon, to guide the denoising process at inference time without backpropagation through reward functions or model retraining. Our approach works by controlling noise distribution in denoising steps to concentrate density on regions corresponding to high rewards through stochastic optimization. We provide comprehensive theoretical and empirical evidence to support and validate our approach, including experiments that use non-differentiable sources of rewards such as Visual-Language Model (VLM) APIs and human judgements. To the best of our knowledge, the proposed approach is the first inference-time, backpropagation-free preference alignment method for diffusion models. Our method can be easily integrated with existing diffusion models without further training. Our experiments show that the proposed approach significantly improves the average aesthetics scores for text-to-image generation.<|reference_end|> | arxiv | @article{yeh2024training-free,
title={Training-free Diffusion Model Alignment with Sampling Demons},
author={Po-Hung Yeh, Kuang-Huei Lee, Jun-Cheng Chen},
journal={arXiv preprint arXiv:2410.05760},
year={2024},
archivePrefix={arXiv},
eprint={2410.05760},
primaryClass={cs.CV cs.AI cs.LG math.OC stat.ML}
} | yeh2024training-free |
arxiv-666933 | 2410.05762 | Guided Self-attention: Find the Generalized Necessarily Distinct Vectors for Grain Size Grading | <|reference_start|>Guided Self-attention: Find the Generalized Necessarily Distinct Vectors for Grain Size Grading: With the development of steel materials, metallographic analysis has become increasingly important. Unfortunately, grain size analysis is a manual process that requires experts to evaluate metallographic photographs, which is unreliable and time-consuming. To resolve this problem, we propose a novel classifi-cation method based on deep learning, namely GSNets, a family of hybrid models which can effectively introduce guided self-attention for classifying grain size. Concretely, we build our models from three insights:(1) Introducing our novel guided self-attention module can assist the model in finding the generalized necessarily distinct vectors capable of retaining intricate rela-tional connections and rich local feature information; (2) By improving the pixel-wise linear independence of the feature map, the highly condensed semantic representation will be captured by the model; (3) Our novel triple-stream merging module can significantly improve the generalization capability and efficiency of the model. Experiments show that our GSNet yields a classifi-cation accuracy of 90.1%, surpassing the state-of-the-art Swin Transformer V2 by 1.9% on the steel grain size dataset, which comprises 3,599 images with 14 grain size levels. Furthermore, we intuitively believe our approach is applicable to broader ap-plications like object detection and semantic segmentation.<|reference_end|> | arxiv | @article{gao2024guided,
title={Guided Self-attention: Find the Generalized Necessarily Distinct Vectors
for Grain Size Grading},
author={Fang Gao, Xuetao Li, Jiabao Wang, Shengheng Ma, Jun Yu},
journal={arXiv preprint arXiv:2410.05762},
year={2024},
archivePrefix={arXiv},
eprint={2410.05762},
primaryClass={cs.CV}
} | gao2024guided |
arxiv-666934 | 2410.05763 | Information Discovery in e-Commerce | <|reference_start|>Information Discovery in e-Commerce: Electronic commerce, or e-commerce, is the buying and selling of goods and services, or the transmitting of funds or data online. E-commerce platforms come in many kinds, with global players such as Amazon, Airbnb, Alibaba, Booking.com, eBay, JD.com and platforms targeting specific geographic regions such as Bol.com and Flipkart.com.Information retrieval has a natural role to play in e-commerce, especially in connecting people to goods and services. Information discovery in e-commerce concerns different types of search (e.g., exploratory search vs. lookup tasks), recommender systems, and natural language processing in e-commerce portals. The rise in popularity of e-commerce sites has made research on information discovery in e-commerce an increasingly active research area. This is witnessed by an increase in publications and dedicated workshops in this space. Methods for information discovery in e-commerce largely focus on improving the effectiveness of e-commerce search and recommender systems, on enriching and using knowledge graphs to support e-commerce, and on developing innovative question answering and bot-based solutions that help to connect people to goods and services. In this survey, an overview is given of the fundamental infrastructure, algorithms, and technical solutions for information discovery in e-commerce. The topics covered include user behavior and profiling, search, recommendation, and language technology in e-commerce.<|reference_end|> | arxiv | @article{ren2024information,
title={Information Discovery in e-Commerce},
author={Zhaochun Ren, Xiangnan He, Dawei Yin, Maarten de Rijke},
journal={arXiv preprint arXiv:2410.05763},
year={2024},
archivePrefix={arXiv},
eprint={2410.05763},
primaryClass={cs.IR cs.CL}
} | ren2024information |
arxiv-666935 | 2410.05766 | StagedVulBERT: Multi-Granular Vulnerability Detection with a Novel Pre-trained Code Model | <|reference_start|>StagedVulBERT: Multi-Granular Vulnerability Detection with a Novel Pre-trained Code Model: The emergence of pre-trained model-based vulnerability detection methods has significantly advanced the field of automated vulnerability detection. However, these methods still face several challenges, such as difficulty in learning effective feature representations of statements for fine-grained predictions and struggling to process overly long code sequences. To address these issues, this study introduces StagedVulBERT, a novel vulnerability detection framework that leverages a pre-trained code language model and employs a coarse-to-fine strategy. The key innovation and contribution of our research lies in the development of the CodeBERT-HLS component within our framework, specialized in hierarchical, layered, and semantic encoding. This component is designed to capture semantics at both the token and statement levels simultaneously, which is crucial for achieving more accurate multi-granular vulnerability detection. Additionally, CodeBERT-HLS efficiently processes longer code token sequences, making it more suited to real-world vulnerability detection. Comprehensive experiments demonstrate that our method enhances the performance of vulnerability detection at both coarse- and fine-grained levels. Specifically, in coarse-grained vulnerability detection, StagedVulBERT achieves an F1 score of 92.26%, marking a 6.58% improvement over the best-performing methods. At the fine-grained level, our method achieves a Top-5% accuracy of 65.69%, which outperforms the state-of-the-art methods by up to 75.17%.<|reference_end|> | arxiv | @article{jiang2024stagedvulbert:,
title={StagedVulBERT: Multi-Granular Vulnerability Detection with a Novel
Pre-trained Code Model},
author={Yuan Jiang, Yujian Zhang, Xiaohong Su, Christoph Treude and Tiantian
Wang},
journal={arXiv preprint arXiv:2410.05766},
year={2024},
archivePrefix={arXiv},
eprint={2410.05766},
primaryClass={cs.CR cs.SE}
} | jiang2024stagedvulbert: |
arxiv-666936 | 2410.05767 | Grounding is All You Need? Dual Temporal Grounding for Video Dialog | <|reference_start|>Grounding is All You Need? Dual Temporal Grounding for Video Dialog: In the realm of video dialog response generation, the understanding of video content and the temporal nuances of conversation history are paramount. While a segment of current research leans heavily on large-scale pretrained visual-language models and often overlooks temporal dynamics, another delves deep into spatial-temporal relationships within videos but demands intricate object trajectory pre-extractions and sidelines dialog temporal dynamics. This paper introduces the Dual Temporal Grounding-enhanced Video Dialog model (DTGVD), strategically designed to merge the strengths of both dominant approaches. It emphasizes dual temporal relationships by predicting dialog turn-specific temporal regions, filtering video content accordingly, and grounding responses in both video and dialog contexts. One standout feature of DTGVD is its heightened attention to chronological interplay. By recognizing and acting upon the dependencies between different dialog turns, it captures more nuanced conversational dynamics. To further bolster the alignment between video and dialog temporal dynamics, we've implemented a list-wise contrastive learning strategy. Within this framework, accurately grounded turn-clip pairings are designated as positive samples, while less precise pairings are categorized as negative. This refined classification is then funneled into our holistic end-to-end response generation mechanism. Evaluations using AVSD@DSTC-7 and AVSD@DSTC-8 datasets underscore the superiority of our methodology.<|reference_end|> | arxiv | @article{qin2024grounding,
title={Grounding is All You Need? Dual Temporal Grounding for Video Dialog},
author={You Qin, Wei Ji, Xinze Lan, Hao Fei, Xun Yang, Dan Guo, Roger
Zimmermann, Lizi Liao},
journal={arXiv preprint arXiv:2410.05767},
year={2024},
archivePrefix={arXiv},
eprint={2410.05767},
primaryClass={cs.CV cs.AI cs.MM}
} | qin2024grounding |
arxiv-666937 | 2410.05770 | Efficient Few-shot Learning for Multi-label Classification of Scientific Documents with Many Classes | <|reference_start|>Efficient Few-shot Learning for Multi-label Classification of Scientific Documents with Many Classes: Scientific document classification is a critical task and often involves many classes. However, collecting human-labeled data for many classes is expensive and usually leads to label-scarce scenarios. Moreover, recent work has shown that sentence embedding model fine-tuning for few-shot classification is efficient, robust, and effective. In this work, we propose FusionSent (Fusion-based Sentence Embedding Fine-tuning), an efficient and prompt-free approach for few-shot classification of scientific documents with many classes. FusionSent uses available training examples and their respective label texts to contrastively fine-tune two different sentence embedding models. Afterward, the parameters of both fine-tuned models are fused to combine the complementary knowledge from the separate fine-tuning steps into a single model. Finally, the resulting sentence embedding model is frozen to embed the training instances, which are then used as input features to train a classification head. Our experiments show that FusionSent significantly outperforms strong baselines by an average of $6.0$ $F_{1}$ points across multiple scientific document classification datasets. In addition, we introduce a new dataset for multi-label classification of scientific documents, which contains 183,565 scientific articles and 130 classes from the arXiv category taxonomy. Code and data are available at https://github.com/sebischair/FusionSent.<|reference_end|> | arxiv | @article{schopf2024efficient,
title={Efficient Few-shot Learning for Multi-label Classification of Scientific
Documents with Many Classes},
author={Tim Schopf, Alexander Blatzheim, Nektarios Machner, Florian Matthes},
journal={arXiv preprint arXiv:2410.05770},
year={2024},
archivePrefix={arXiv},
eprint={2410.05770},
primaryClass={cs.CL}
} | schopf2024efficient |
arxiv-666938 | 2410.05771 | Cefdet: Cognitive Effectiveness Network Based on Fuzzy Inference for Action Detection | <|reference_start|>Cefdet: Cognitive Effectiveness Network Based on Fuzzy Inference for Action Detection: Action detection and understanding provide the foundation for the generation and interaction of multimedia content. However, existing methods mainly focus on constructing complex relational inference networks, overlooking the judgment of detection effectiveness. Moreover, these methods frequently generate detection results with cognitive abnormalities. To solve the above problems, this study proposes a cognitive effectiveness network based on fuzzy inference (Cefdet), which introduces the concept of "cognition-based detection" to simulate human cognition. First, a fuzzy-driven cognitive effectiveness evaluation module (FCM) is established to introduce fuzzy inference into action detection. FCM is combined with human action features to simulate the cognition-based detection process, which clearly locates the position of frames with cognitive abnormalities. Then, a fuzzy cognitive update strategy (FCS) is proposed based on the FCM, which utilizes fuzzy logic to re-detect the cognition-based detection results and effectively update the results with cognitive abnormalities. Experimental results demonstrate that Cefdet exhibits superior performance against several mainstream algorithms on the public datasets, validating its effectiveness and superiority.<|reference_end|> | arxiv | @article{luo2024cefdet:,
title={Cefdet: Cognitive Effectiveness Network Based on Fuzzy Inference for
Action Detection},
author={Zhe Luo, Weina Fu, Shuai Liu, Saeed Anwar, Muhammad Saqib, Sambit
Bakshi and Khan Muhammad},
journal={arXiv preprint arXiv:2410.05771},
year={2024},
archivePrefix={arXiv},
eprint={2410.05771},
primaryClass={cs.CV}
} | luo2024cefdet: |
arxiv-666939 | 2410.05772 | Comparative Analysis of Novel View Synthesis and Photogrammetry for 3D Forest Stand Reconstruction and extraction of individual tree parameters | <|reference_start|>Comparative Analysis of Novel View Synthesis and Photogrammetry for 3D Forest Stand Reconstruction and extraction of individual tree parameters: Accurate and efficient 3D reconstruction of trees is crucial for forest resource assessments and management. Close-Range Photogrammetry (CRP) is commonly used for reconstructing forest scenes but faces challenges like low efficiency and poor quality. Recently, Novel View Synthesis (NVS) technologies, including Neural Radiance Fields (NeRF) and 3D Gaussian Splatting (3DGS), have shown promise for 3D plant reconstruction with limited images. However, existing research mainly focuses on small plants in orchards or individual trees, leaving uncertainty regarding their application in larger, complex forest stands. In this study, we collected sequential images of forest plots with varying complexity and performed dense reconstruction using NeRF and 3DGS. The resulting point clouds were compared with those from photogrammetry and laser scanning. Results indicate that NVS methods significantly enhance reconstruction efficiency. Photogrammetry struggles with complex stands, leading to point clouds with excessive canopy noise and incorrectly reconstructed trees, such as duplicated trunks. NeRF, while better for canopy regions, may produce errors in ground areas with limited views. The 3DGS method generates sparser point clouds, particularly in trunk areas, affecting diameter at breast height (DBH) accuracy. All three methods can extract tree height information, with NeRF yielding the highest accuracy; however, photogrammetry remains superior for DBH accuracy. These findings suggest that NVS methods have significant potential for 3D reconstruction of forest stands, offering valuable support for complex forest resource inventory and visualization tasks.<|reference_end|> | arxiv | @article{tian2024comparative,
title={Comparative Analysis of Novel View Synthesis and Photogrammetry for 3D
Forest Stand Reconstruction and extraction of individual tree parameters},
author={Guoji Tian, Chongcheng Chen, Hongyu Huang},
journal={arXiv preprint arXiv:2410.05772},
year={2024},
archivePrefix={arXiv},
eprint={2410.05772},
primaryClass={cs.CV}
} | tian2024comparative |
arxiv-666940 | 2410.05773 | GLRT-Based Metric Learning for Remote Sensing Object Retrieval | <|reference_start|>GLRT-Based Metric Learning for Remote Sensing Object Retrieval: With the improvement in the quantity and quality of remote sensing images, content-based remote sensing object retrieval (CBRSOR) has become an increasingly important topic. However, existing CBRSOR methods neglect the utilization of global statistical information during both training and test stages, which leads to the overfitting of neural networks to simple sample pairs of samples during training and suboptimal metric performance. Inspired by the Neyman-Pearson theorem, we propose a generalized likelihood ratio test-based metric learning (GLRTML) approach, which can estimate the relative difficulty of sample pairs by incorporating global data distribution information during training and test phases. This guides the network to focus more on difficult samples during the training process, thereby encourages the network to learn more discriminative feature embeddings. In addition, GLRT is a more effective than traditional metric space due to the utilization of global data distribution information. Accurately estimating the distribution of embeddings is critical for GLRTML. However, in real-world applications, there is often a distribution shift between the training and target domains, which diminishes the effectiveness of directly using the distribution estimated on training data. To address this issue, we propose the clustering pseudo-labels-based fast parameter adaptation (CPLFPA) method. CPLFPA efficiently estimates the distribution of embeddings in the target domain by clustering target domain instances and re-estimating the distribution parameters for GLRTML. We reorganize datasets for CBRSOR tasks based on fine-grained ship remote sensing image slices (FGSRSI-23) and military aircraft recognition (MAR20) datasets. Extensive experiments on these datasets demonstrate the effectiveness of our proposed GLRTML and CPLFPA.<|reference_end|> | arxiv | @article{zhang2024glrt-based,
title={GLRT-Based Metric Learning for Remote Sensing Object Retrieval},
author={Linping Zhang, Yu Liu, Xueqian Wang, Gang Li, and You He},
journal={arXiv preprint arXiv:2410.05773},
year={2024},
archivePrefix={arXiv},
eprint={2410.05773},
primaryClass={cs.CV}
} | zhang2024glrt-based |
arxiv-666941 | 2410.05774 | ActionAtlas: A VideoQA Benchmark for Domain-specialized Action Recognition | <|reference_start|>ActionAtlas: A VideoQA Benchmark for Domain-specialized Action Recognition: Our world is full of varied actions and moves across specialized domains that we, as humans, strive to identify and understand. Within any single domain, actions can often appear quite similar, making it challenging for deep models to distinguish them accurately. To evaluate the effectiveness of multimodal foundation models in helping us recognize such actions, we present ActionAtlas v1.0, a multiple-choice video question answering benchmark featuring short videos across various sports. Each video in the dataset is paired with a question and four or five choices. The question pinpoints specific individuals, asking which choice "best" describes their action within a certain temporal context. Overall, the dataset includes 934 videos showcasing 580 unique actions across 56 sports, with a total of 1896 actions within choices. Unlike most existing video question answering benchmarks that only cover simplistic actions, often identifiable from a single frame, ActionAtlas focuses on intricate movements and rigorously tests the model's capability to discern subtle differences between moves that look similar within each domain. We evaluate open and proprietary foundation models on this benchmark, finding that the best model, GPT-4o, achieves a maximum accuracy of 45.52%. Meanwhile, Non-expert crowd workers, provided with action description for each choice, achieve 61.64% accuracy, where random chance is approximately 21%. Our findings with state-of-the-art models indicate that having a high frame sampling rate is important for accurately recognizing actions in ActionAtlas, a feature that some leading proprietary video models, such as Gemini, do not include in their default configuration.<|reference_end|> | arxiv | @article{salehi2024actionatlas:,
title={ActionAtlas: A VideoQA Benchmark for Domain-specialized Action
Recognition},
author={Mohammadreza Salehi, Jae Sung Park, Tanush Yadav, Aditya Kusupati,
Ranjay Krishna, Yejin Choi, Hannaneh Hajishirzi, Ali Farhadi},
journal={NeurIPS 2024 Track Datasets and Benchmarks},
year={2024},
archivePrefix={arXiv},
eprint={2410.05774},
primaryClass={cs.CV}
} | salehi2024actionatlas: |
arxiv-666942 | 2410.05775 | Numerical Algorithms for the Reconstruction of Space-Dependent Sources in Thermoelasticity | <|reference_start|>Numerical Algorithms for the Reconstruction of Space-Dependent Sources in Thermoelasticity: This paper investigates the inverse problems of determining a space-dependent source for thermoelastic systems of type III under adequate time-averaged or final-in-time measurements and conditions on the time-dependent part of the sought source. Several numerical methods are proposed and examined, including a Landweber scheme and minimisation methods for the corresponding cost functionals, which are based on the gradient and conjugate gradient method. A shortcoming of these methods is that the values of the sought source are fixed ab initio and remain fixed during the iterations. The Sobolev gradient method is applied to overcome the possible inaccessibility of the source values at the boundary. Numerical examples are presented to discuss the different approaches and support our findings based on the implementation on the FEniCSx platform.<|reference_end|> | arxiv | @article{maes2024numerical,
title={Numerical Algorithms for the Reconstruction of Space-Dependent Sources
in Thermoelasticity},
author={Frederick Maes and Karel Van Bockstal},
journal={arXiv preprint arXiv:2410.05775},
year={2024},
archivePrefix={arXiv},
eprint={2410.05775},
primaryClass={math.NA cs.NA math.AP}
} | maes2024numerical |
arxiv-666943 | 2410.05776 | Viscoelasticity Estimation of Sports Prosthesis by Energy-minimizing Inverse Kinematics and Its Validation by Forward Dynamics | <|reference_start|>Viscoelasticity Estimation of Sports Prosthesis by Energy-minimizing Inverse Kinematics and Its Validation by Forward Dynamics: In this study, we present a method for estimating the viscoelasticity of a leaf-spring sports prosthesis using advanced energy minimizing inverse kinematics based on the Piece-wise Constant Strain (PCS) model to reconstruct the three-dimensional dynamic behavior. Dynamic motion analysis of the athlete and prosthesis is important to clarify the effect of prosthesis characteristics on foot function. However, three-dimensional deformation calculations of the prosthesis and viscoelasticity have rarely been investigated. In this letter, we apply the PCS model to a prosthesis deformation, which can calculate flexible deformation with low computational cost and handle kinematics and dynamics. In addition, we propose an inverse kinematics calculation method that is consistent with the material properties of the prosthesis by considering the minimization of elastic energy. Furthermore, we propose a method to estimate the viscoelasticity by solving a quadratic programming based on the measured motion capture data. The calculated strains are more reasonable than the results obtained by conventional inverse kinematics calculation. From the result of the viscoelasticity estimation, we simulate the prosthetic motion by forward dynamics calculation and confirm that this result corresponds to the measured motion. These results indicate that our approach adequately models the dynamic phenomena, including the viscoelasticity of the prosthesis.<|reference_end|> | arxiv | @article{shimane2024viscoelasticity,
title={Viscoelasticity Estimation of Sports Prosthesis by Energy-minimizing
Inverse Kinematics and Its Validation by Forward Dynamics},
author={Yuta Shimane, Taiki Ishigaki, Sunghee Kim and Ko Yamamoto},
journal={Advanced Robotics, 2024, 1-13},
year={2024},
doi={10.1080/01691864.2024.2407118},
archivePrefix={arXiv},
eprint={2410.05776},
primaryClass={cs.RO}
} | shimane2024viscoelasticity |
arxiv-666944 | 2410.05777 | Integrated Encoding and Quantization to Enhance Quanvolutional Neural Networks | <|reference_start|>Integrated Encoding and Quantization to Enhance Quanvolutional Neural Networks: Image processing is one of the most promising applications for quantum machine learning (QML). Quanvolutional Neural Networks with non-trainable parameters are the preferred solution to run on current and near future quantum devices. The typical input preprocessing pipeline for quanvolutional layers comprises of four steps: optional input binary quantization, encoding classical data into quantum states, processing the data to obtain the final quantum states, decoding quantum states back to classical outputs. In this paper we propose two ways to enhance the efficiency of quanvolutional models. First, we propose a flexible data quantization approach with memoization, applicable to any encoding method. This allows us to increase the number of quantization levels to retain more information or lower them to reduce the amount of circuit executions. Second, we introduce a new integrated encoding strategy, which combines the encoding and processing steps in a single circuit. This method allows great flexibility on several architectural parameters (e.g., number of qubits, filter size, and circuit depth) making them adjustable to quantum hardware requirements. We compare our proposed integrated model with a classical convolutional neural network and the well-known rotational encoding method, on two different classification tasks. The results demonstrate that our proposed model encoding exhibits a comparable or superior performance to the other models while requiring fewer quantum resources.<|reference_end|> | arxiv | @article{bosco2024integrated,
title={Integrated Encoding and Quantization to Enhance Quanvolutional Neural
Networks},
author={Daniele Lizzio Bosco, Beatrice Portelli, Giuseppe Serra},
journal={arXiv preprint arXiv:2410.05777},
year={2024},
archivePrefix={arXiv},
eprint={2410.05777},
primaryClass={quant-ph cs.AI cs.LG}
} | bosco2024integrated |
arxiv-666945 | 2410.05778 | Song Emotion Classification of Lyrics with Out-of-Domain Data under Label Scarcity | <|reference_start|>Song Emotion Classification of Lyrics with Out-of-Domain Data under Label Scarcity: Songs have been found to profoundly impact human emotions, with lyrics having significant power to stimulate emotional changes in the audience. There is a scarcity of large, high quality in-domain datasets for lyrics-based song emotion classification (Edmonds and Sedoc, 2021; Zhou, 2022). It has been noted that in-domain training datasets are often difficult to acquire (Zhang and Miao, 2023) and that label acquisition is often limited by cost, time, and other factors (Azad et al., 2018). We examine the novel usage of a large out-of-domain dataset as a creative solution to the challenge of training data scarcity in the emotional classification of song lyrics. We find that CNN models trained on a large Reddit comments dataset achieve satisfactory performance and generalizability to lyrical emotion classification, thus giving insights into and a promising possibility in leveraging large, publicly available out-of-domain datasets for domains whose in-domain data are lacking or costly to acquire.<|reference_end|> | arxiv | @article{sakunkoo2024song,
title={Song Emotion Classification of Lyrics with Out-of-Domain Data under
Label Scarcity},
author={Jonathan Sakunkoo and Annabella Sakunkoo},
journal={arXiv preprint arXiv:2410.05778},
year={2024},
archivePrefix={arXiv},
eprint={2410.05778},
primaryClass={cs.CL cs.LG}
} | sakunkoo2024song |
arxiv-666946 | 2410.05779 | LightRAG: Simple and Fast Retrieval-Augmented Generation | <|reference_start|>LightRAG: Simple and Fast Retrieval-Augmented Generation: Retrieval-Augmented Generation (RAG) systems enhance large language models (LLMs) by integrating external knowledge sources, enabling more accurate and contextually relevant responses tailored to user needs. However, existing RAG systems have significant limitations, including reliance on flat data representations and inadequate contextual awareness, which can lead to fragmented answers that fail to capture complex inter-dependencies. To address these challenges, we propose LightRAG, which incorporates graph structures into text indexing and retrieval processes. This innovative framework employs a dual-level retrieval system that enhances comprehensive information retrieval from both low-level and high-level knowledge discovery. Additionally, the integration of graph structures with vector representations facilitates efficient retrieval of related entities and their relationships, significantly improving response times while maintaining contextual relevance. This capability is further enhanced by an incremental update algorithm that ensures the timely integration of new data, allowing the system to remain effective and responsive in rapidly changing data environments. Extensive experimental validation demonstrates considerable improvements in retrieval accuracy and efficiency compared to existing approaches. We have made our LightRAG open-source and available at the link: https://github.com/HKUDS/LightRAG.<|reference_end|> | arxiv | @article{guo2024lightrag:,
title={LightRAG: Simple and Fast Retrieval-Augmented Generation},
author={Zirui Guo, Lianghao Xia, Yanhua Yu, Tu Ao, Chao Huang},
journal={arXiv preprint arXiv:2410.05779},
year={2024},
archivePrefix={arXiv},
eprint={2410.05779},
primaryClass={cs.IR cs.AI}
} | guo2024lightrag: |
arxiv-666947 | 2410.05780 | GlucoBench: Curated List of Continuous Glucose Monitoring Datasets with Prediction Benchmarks | <|reference_start|>GlucoBench: Curated List of Continuous Glucose Monitoring Datasets with Prediction Benchmarks: The rising rates of diabetes necessitate innovative methods for its management. Continuous glucose monitors (CGM) are small medical devices that measure blood glucose levels at regular intervals providing insights into daily patterns of glucose variation. Forecasting of glucose trajectories based on CGM data holds the potential to substantially improve diabetes management, by both refining artificial pancreas systems and enabling individuals to make adjustments based on predictions to maintain optimal glycemic range.Despite numerous methods proposed for CGM-based glucose trajectory prediction, these methods are typically evaluated on small, private datasets, impeding reproducibility, further research, and practical adoption. The absence of standardized prediction tasks and systematic comparisons between methods has led to uncoordinated research efforts, obstructing the identification of optimal tools for tackling specific challenges. As a result, only a limited number of prediction methods have been implemented in clinical practice. To address these challenges, we present a comprehensive resource that provides (1) a consolidated repository of curated publicly available CGM datasets to foster reproducibility and accessibility; (2) a standardized task list to unify research objectives and facilitate coordinated efforts; (3) a set of benchmark models with established baseline performance, enabling the research community to objectively gauge new methods' efficacy; and (4) a detailed analysis of performance-influencing factors for model development. We anticipate these resources to propel collaborative research endeavors in the critical domain of CGM-based glucose predictions. {Our code is available online at github.com/IrinaStatsLab/GlucoBench.<|reference_end|> | arxiv | @article{sergazinov2024glucobench:,
title={GlucoBench: Curated List of Continuous Glucose Monitoring Datasets with
Prediction Benchmarks},
author={Renat Sergazinov and Elizabeth Chun and Valeriya Rogovchenko and
Nathaniel Fernandes and Nicholas Kasman and Irina Gaynanova},
journal={arXiv preprint arXiv:2410.05780},
year={2024},
archivePrefix={arXiv},
eprint={2410.05780},
primaryClass={q-bio.QM cs.LG stat.AP}
} | sergazinov2024glucobench: |
arxiv-666948 | 2410.05782 | Reinforcement Learning From Imperfect Corrective Actions And Proxy Rewards | <|reference_start|>Reinforcement Learning From Imperfect Corrective Actions And Proxy Rewards: In practice, reinforcement learning (RL) agents are often trained with a possibly imperfect proxy reward function, which may lead to a human-agent alignment issue (i.e., the learned policy either converges to non-optimal performance with low cumulative rewards, or achieves high cumulative rewards but in undesired manner). To tackle this issue, we consider a framework where a human labeler can provide additional feedback in the form of corrective actions, which expresses the labeler's action preferences although this feedback may possibly be imperfect as well. In this setting, to obtain a better-aligned policy guided by both learning signals, we propose a novel value-based deep RL algorithm called Iterative learning from Corrective actions and Proxy rewards (ICoPro), which cycles through three phases: (1) Solicit sparse corrective actions from a human labeler on the agent's demonstrated trajectories; (2) Incorporate these corrective actions into the Q-function using a margin loss to enforce adherence to labeler's preferences; (3) Train the agent with standard RL losses regularized with a margin loss to learn from proxy rewards and propagate the Q-values learned from human feedback. Moreover, another novel design in our approach is to integrate pseudo-labels from the target Q-network to reduce human labor and further stabilize training. We experimentally validate our proposition on a variety of tasks (Atari games and autonomous driving on highway). On the one hand, using proxy rewards with different levels of imperfection, our method can better align with human preferences and is more sample-efficient than baseline methods. On the other hand, facing corrective actions with different types of imperfection, our method can overcome the non-optimality of this feedback thanks to the guidance from proxy reward.<|reference_end|> | arxiv | @article{jiang2024reinforcement,
title={Reinforcement Learning From Imperfect Corrective Actions And Proxy
Rewards},
author={Zhaohui Jiang, Xuening Feng, Paul Weng, Yifei Zhu, Yan Song, Tianze
Zhou, Yujing Hu, Tangjie Lv, Changjie Fan},
journal={arXiv preprint arXiv:2410.05782},
year={2024},
archivePrefix={arXiv},
eprint={2410.05782},
primaryClass={cs.LG}
} | jiang2024reinforcement |
arxiv-666949 | 2410.05785 | Contextual Bandits with Non-Stationary Correlated Rewards for User Association in MmWave Vehicular Networks | <|reference_start|>Contextual Bandits with Non-Stationary Correlated Rewards for User Association in MmWave Vehicular Networks: Millimeter wave (mmWave) communication has emerged as a propelling technology in vehicular communication. Usually, an appropriate decision on user association requires timely channel information between vehicles and base stations (BSs), which is challenging given a fast-fading mmWave vehicular channel. In this paper, relying solely on learning transmission rate, we propose a low-complexity semi-distributed contextual correlated upper confidence bound (SD-CC-UCB) algorithm to establish an up-to-date user association without explicit measurement of channel state information (CSI). Under a contextual multi-arm bandits framework, SD-CC-UCB learns and predicts the transmission rate given the location and velocity of the vehicle, which can adequately capture the intricate channel condition for a prompt decision on user association. Further, SD-CC-UCB efficiently identifies the set of candidate BSs which probably support supreme transmission rate by leveraging the correlated distributions of transmission rates on different locations. To further refine the learning transmission rate over the link to candidate BSs, each vehicle deploys the Thompson Sampling algorithm by taking the interference among vehicles and handover overhead into consideration. Numerical results show that our proposed algorithm achieves the network throughput within 100%-103% of a benchmark algorithm which requires perfect instantaneous CSI, demonstrating the effectiveness of SD-CC-UCB in vehicular communications.<|reference_end|> | arxiv | @article{he2024contextual,
title={Contextual Bandits with Non-Stationary Correlated Rewards for User
Association in MmWave Vehicular Networks},
author={Xiaoyang He, Xiaoxia Huang, Lanhua Li},
journal={arXiv preprint arXiv:2410.05785},
year={2024},
archivePrefix={arXiv},
eprint={2410.05785},
primaryClass={cs.LG}
} | he2024contextual |
arxiv-666950 | 2410.05786 | Enhanced Feature Based Granular Ball Twin Support Vector Machine | <|reference_start|>Enhanced Feature Based Granular Ball Twin Support Vector Machine: In this paper, we propose enhanced feature based granular ball twin support vector machine (EF-GBTSVM). EF-GBTSVM employs the coarse granularity of granular balls (GBs) as input rather than individual data samples. The GBs are mapped to the feature space of the hidden layer using random projection followed by the utilization of a non-linear activation function. The concatenation of original and hidden features derived from the centers of GBs gives rise to an enhanced feature space, commonly referred to as the random vector functional link (RVFL) space. This space encapsulates nuanced feature information to GBs. Further, we employ twin support vector machine (TSVM) in the RVFL space for classification. TSVM generates the two non-parallel hyperplanes in the enhanced feature space, which improves the generalization performance of the proposed EF-GBTSVM model. Moreover, the coarser granularity of the GBs enables the proposed EF-GBTSVM model to exhibit robustness to resampling, showcasing reduced susceptibility to the impact of noise and outliers. We undertake a thorough evaluation of the proposed EF-GBTSVM model on benchmark UCI and KEEL datasets. This evaluation encompasses scenarios with and without the inclusion of label noise. Moreover, experiments using NDC datasets further emphasize the proposed model's ability to handle large datasets. Experimental results, supported by thorough statistical analyses, demonstrate that the proposed EF-GBTSVM model significantly outperforms the baseline models in terms of generalization capabilities, scalability, and robustness. The source code for the proposed EF-GBTSVM model, along with additional results and further details, can be accessed at https://github.com/mtanveer1/EF-GBTSVM.<|reference_end|> | arxiv | @article{quadir2024enhanced,
title={Enhanced Feature Based Granular Ball Twin Support Vector Machine},
author={A. Quadir, M. Sajid, Mushir Akhtar, M. Tanveer, P. N. Suganthan},
journal={27th International Conference on Pattern Recognition (ICPR), 2024},
year={2024},
archivePrefix={arXiv},
eprint={2410.05786},
primaryClass={cs.LG}
} | quadir2024enhanced |
arxiv-666951 | 2410.05787 | An Adaptive Dual-Domain Prediction Strategy based on Second-order Derivatives for Dynamic Multi-Objective Optimization | <|reference_start|>An Adaptive Dual-Domain Prediction Strategy based on Second-order Derivatives for Dynamic Multi-Objective Optimization: This paper addresses the problem of dynamic multi-objective optimization problems (DMOPs), by demonstrating new approaches to change prediction strategies within an evolutionary algorithm paradigm. Because the objectives of such problems change over time, the Pareto optimal set (PS) and Pareto optimal front (PF) are also dynamic. To accurately track the changing PS and PF in the decision and objective spaces, we propose a novel adaptive prediction strategy, which utilizes the concept of second-order derivatives adaptively in different domains. %to deal with DMOPs. Firstly, the changes in both the PS and the PF are considered in this paper, which makes the proposed a dual-domain based method. Firstly, we propose a dual-domain method, which takes into account changes in both the PS and the PF simultaneously. An adaptive strategy is adopted to self-adjust the proportion of the search space. Secondly, a second-order derivative prediction strategy is proposed to predicatively re-initialize the population. We compare the performance of the proposed algorithm against four other state-of-the-art algorithms from the literature, using DMOPs benchmark problems. Experimental results show that the proposed method outperforms the other algorithms on most of the test problems.<|reference_end|> | arxiv | @article{lei2024an,
title={An accelerate Prediction Strategy for Dynamic Multi-Objective
Optimization},
author={Ru Lei, Lin Li, Rustam Stolkin, Bin Feng},
journal={arXiv preprint arXiv:2410.05787},
year={2024},
archivePrefix={arXiv},
eprint={2410.05787},
primaryClass={cs.NE}
} | lei2024an |
arxiv-666952 | 2410.05789 | Hybrid Gripper with Passive Pneumatic Soft Joints for Grasping Deformable Thin Objects | <|reference_start|>Hybrid Gripper with Passive Pneumatic Soft Joints for Grasping Deformable Thin Objects: Grasping a variety of objects remains a key challenge in the development of versatile robotic systems. The human hand is remarkably dexterous, capable of grasping and manipulating objects with diverse shapes, mechanical properties, and textures. Inspired by how humans use two fingers to pick up thin and large objects such as fabric or sheets of paper, we aim to develop a gripper optimized for grasping such deformable objects. Observing how the soft and flexible fingertip joints of the hand approach and grasp thin materials, a hybrid gripper design that incorporates both soft and rigid components was proposed. The gripper utilizes a soft pneumatic ring wrapped around a rigid revolute joint to create a flexible two-fingered gripper. Experiments were conducted to characterize and evaluate the gripper performance in handling sheets of paper and other objects. Compared to rigid grippers, the proposed design improves grasping efficiency and reduces the gripping distance by up to eightfold.<|reference_end|> | arxiv | @article{tran2024hybrid,
title={Hybrid Gripper with Passive Pneumatic Soft Joints for Grasping
Deformable Thin Objects},
author={Ngoc-Duy Tran, Hoang-Hiep Ly, Xuan-Thuan Nguyen, Thi-Thoa Mac, Anh
Nguyen, Tung D. Ta},
journal={arXiv preprint arXiv:2410.05789},
year={2024},
archivePrefix={arXiv},
eprint={2410.05789},
primaryClass={cs.RO}
} | tran2024hybrid |
arxiv-666953 | 2410.05791 | F\"urElise: Capturing and Physically Synthesizing Hand Motions of Piano Performance | <|reference_start|>F\"urElise: Capturing and Physically Synthesizing Hand Motions of Piano Performance: Piano playing requires agile, precise, and coordinated hand control that stretches the limits of dexterity. Hand motion models with the sophistication to accurately recreate piano playing have a wide range of applications in character animation, embodied AI, biomechanics, and VR/AR. In this paper, we construct a first-of-its-kind large-scale dataset that contains approximately 10 hours of 3D hand motion and audio from 15 elite-level pianists playing 153 pieces of classical music. To capture natural performances, we designed a markerless setup in which motions are reconstructed from multi-view videos using state-of-the-art pose estimation models. The motion data is further refined via inverse kinematics using the high-resolution MIDI key-pressing data obtained from sensors in a specialized Yamaha Disklavier piano. Leveraging the collected dataset, we developed a pipeline that can synthesize physically-plausible hand motions for musical scores outside of the dataset. Our approach employs a combination of imitation learning and reinforcement learning to obtain policies for physics-based bimanual control involving the interaction between hands and piano keys. To solve the sampling efficiency problem with the large motion dataset, we use a diffusion model to generate natural reference motions, which provide high-level trajectory and fingering (finger order and placement) information. However, the generated reference motion alone does not provide sufficient accuracy for piano performance modeling. We then further augmented the data by using musical similarity to retrieve similar motions from the captured dataset to boost the precision of the RL policy. With the proposed method, our model generates natural, dexterous motions that generalize to music from outside the training dataset.<|reference_end|> | arxiv | @article{wang2024f\"urelise:,
title={F\"urElise: Capturing and Physically Synthesizing Hand Motions of Piano
Performance},
author={Ruocheng Wang, Pei Xu, Haochen Shi, Elizabeth Schumann, C. Karen Liu},
journal={arXiv preprint arXiv:2410.05791},
year={2024},
archivePrefix={arXiv},
eprint={2410.05791},
primaryClass={cs.GR cs.AI cs.SD eess.AS}
} | wang2024f\"urelise: |
arxiv-666954 | 2410.05793 | Distributed Coordination for Multi-Vehicle Systems in the Presence of Misbehaving Vehicles | <|reference_start|>Distributed Coordination for Multi-Vehicle Systems in the Presence of Misbehaving Vehicles: The coordination problem of multi-vehicle systems is of great interests in the area of autonomous driving and multi-vehicle control. This work mainly focuses on multi-task coordination problem of a group of vehicles with a bicycle model and some specific control objectives, including collision avoidance, connectivity maintenance and convergence to desired destinations. The basic idea is to develop a proper Lyapunov-like barrier function for all tasks and a distributed controller could be built in the presence of misbehaving vehicles. Control protocols are provided for both leader vehicle and follower vehicles. The simulation results demonstrate the effectiveness of proposed method.<|reference_end|> | arxiv | @article{han2024distributed,
title={Distributed Coordination for Multi-Vehicle Systems in the Presence of
Misbehaving Vehicles},
author={Dongkun Han, Yijun Huang, Hejun Huang and Tianrui Fang},
journal={arXiv preprint arXiv:2410.05793},
year={2024},
archivePrefix={arXiv},
eprint={2410.05793},
primaryClass={eess.SY cs.SY}
} | han2024distributed |
arxiv-666955 | 2410.05796 | Software analytics for software engineering: A tertiary review | <|reference_start|>Software analytics for software engineering: A tertiary review: Software analytics (SA) is frequently proposed as a tool to support practitioners in software engineering (SE) tasks. We have observed that several secondary studies on SA have been published. Some of these studies have overlapping aims and some have even been published in the same calendar year. This presents an opportunity to analyze the congruence or divergence of the conclusions in these studies. Such an analysis can help identify broader generalizations beyond any of the individual secondary studies. We identified five secondary studies on the use of SA for SE. These secondary studies cover primary research from 2000 to 2021. Despite the overlapping objectives and search time frames of these secondary studies, there is negligible overlap of primary studies between these secondary studies. Thus, each of them provides an isolated view, and together, they provide a fragmented view, i.e., there is no ``common picture'' of the area. Thus, we conclude that an overview of the literature identified by these secondary studies would be useful in providing a more comprehensive overview of the topic.<|reference_end|> | arxiv | @article{laiq2024software,
title={Software analytics for software engineering: A tertiary review},
author={Muhammad Laiq, Nauman bin Ali, J"urgen B"orstler, Emelie Engstr"om},
journal={arXiv preprint arXiv:2410.05796},
year={2024},
archivePrefix={arXiv},
eprint={2410.05796},
primaryClass={cs.SE}
} | laiq2024software |
arxiv-666956 | 2410.05797 | CodeCipher: Learning to Obfuscate Source Code Against LLMs | <|reference_start|>CodeCipher: Learning to Obfuscate Source Code Against LLMs: While large code language models have made significant strides in AI-assisted coding tasks, there are growing concerns about privacy challenges. The user code is transparent to the cloud LLM service provider, inducing risks of unauthorized training, reading, and execution of the user code. In this paper, we propose CodeCipher, a novel method that perturbs privacy from code while preserving the original response from LLMs. CodeCipher transforms the LLM's embedding matrix so that each row corresponds to a different word in the original matrix, forming a token-to-token confusion mapping for obfuscating source code. The new embedding matrix is optimized by minimizing the task-specific loss function. To tackle the challenge of the discrete and sparse nature of word vector spaces, CodeCipher adopts a discrete optimization strategy that aligns the updated vector to the nearest valid token in the vocabulary before each gradient update. We demonstrate the effectiveness of our approach on three AI-assisted coding tasks including code completion, summarization, and translation. Results show that our model successfully confuses the privacy in source code while preserving the original LLM's performance.<|reference_end|> | arxiv | @article{lin2024codecipher:,
title={CodeCipher: Learning to Obfuscate Source Code Against LLMs},
author={Yalan Lin, Chengcheng Wan, Yixiong Fang, Xiaodong Gu},
journal={arXiv preprint arXiv:2410.05797},
year={2024},
archivePrefix={arXiv},
eprint={2410.05797},
primaryClass={cs.CL}
} | lin2024codecipher: |
arxiv-666957 | 2410.05798 | Integrating Online Learning and Connectivity Maintenance for Communication-Aware Multi-Robot Coordination | <|reference_start|>Integrating Online Learning and Connectivity Maintenance for Communication-Aware Multi-Robot Coordination: This paper proposes a novel data-driven control strategy for maintaining connectivity in networked multi-robot systems. Existing approaches often rely on a pre-determined communication model specifying whether pairwise robots can communicate given their relative distance to guide the connectivity-aware control design, which may not capture real-world communication conditions. To relax that assumption, we present the concept of Data-driven Connectivity Barrier Certificates, which utilize Control Barrier Functions (CBF) and Gaussian Processes (GP) to characterize the admissible control space for pairwise robots based on communication performance observed online. This allows robots to maintain a satisfying level of pairwise communication quality (measured by the received signal strength) while in motion. Then we propose a Data-driven Connectivity Maintenance (DCM) algorithm that combines (1) online learning of the communication signal strength and (2) a bi-level optimization-based control framework for the robot team to enforce global connectivity of the realistic multi-robot communication graph and minimally deviate from their task-related motions. We provide theoretical proofs to justify the properties of our algorithm and demonstrate its effectiveness through simulations with up to 20 robots.<|reference_end|> | arxiv | @article{yang2024integrating,
title={Integrating Online Learning and Connectivity Maintenance for
Communication-Aware Multi-Robot Coordination},
author={Yupeng Yang, Yiwei Lyu, Yanze Zhang, Ian Gao, and Wenhao Luo},
journal={arXiv preprint arXiv:2410.05798},
year={2024},
archivePrefix={arXiv},
eprint={2410.05798},
primaryClass={cs.RO}
} | yang2024integrating |
arxiv-666958 | 2410.05799 | SeeClear: Semantic Distillation Enhances Pixel Condensation for Video Super-Resolution | <|reference_start|>SeeClear: Semantic Distillation Enhances Pixel Condensation for Video Super-Resolution: Diffusion-based Video Super-Resolution (VSR) is renowned for generating perceptually realistic videos, yet it grapples with maintaining detail consistency across frames due to stochastic fluctuations. The traditional approach of pixel-level alignment is ineffective for diffusion-processed frames because of iterative disruptions. To overcome this, we introduce SeeClear--a novel VSR framework leveraging conditional video generation, orchestrated by instance-centric and channel-wise semantic controls. This framework integrates a Semantic Distiller and a Pixel Condenser, which synergize to extract and upscale semantic details from low-resolution frames. The Instance-Centric Alignment Module (InCAM) utilizes video-clip-wise tokens to dynamically relate pixels within and across frames, enhancing coherency. Additionally, the Channel-wise Texture Aggregation Memory (CaTeGory) infuses extrinsic knowledge, capitalizing on long-standing semantic textures. Our method also innovates the blurring diffusion process with the ResShift mechanism, finely balancing between sharpness and diffusion effects. Comprehensive experiments confirm our framework's advantage over state-of-the-art diffusion-based VSR techniques. The code is available: https://github.com/Tang1705/SeeClear-NeurIPS24.<|reference_end|> | arxiv | @article{tang2024seeclear:,
title={SeeClear: Semantic Distillation Enhances Pixel Condensation for Video
Super-Resolution},
author={Qi Tang, Yao Zhao, Meiqin Liu, Chao Yao},
journal={arXiv preprint arXiv:2410.05799},
year={2024},
archivePrefix={arXiv},
eprint={2410.05799},
primaryClass={cs.CV}
} | tang2024seeclear: |
arxiv-666959 | 2410.05800 | Core Tokensets for Data-efficient Sequential Training of Transformers | <|reference_start|>Core Tokensets for Data-efficient Sequential Training of Transformers: Deep networks are frequently tuned to novel tasks and continue learning from ongoing data streams. Such sequential training requires consolidation of new and past information, a challenge predominantly addressed by retaining the most important data points - formally known as coresets. Traditionally, these coresets consist of entire samples, such as images or sentences. However, recent transformer architectures operate on tokens, leading to the famous assertion that an image is worth 16x16 words. Intuitively, not all of these tokens are equally informative or memorable. Going beyond coresets, we thus propose to construct a deeper-level data summary on the level of tokens. Our respectively named core tokensets both select the most informative data points and leverage feature attribution to store only their most relevant features. We demonstrate that core tokensets yield significant performance retention in incremental image classification, open-ended visual question answering, and continual image captioning with significantly reduced memory. In fact, we empirically find that a core tokenset of 1\% of the data performs comparably to at least a twice as large and up to 10 times larger coreset.<|reference_end|> | arxiv | @article{paul2024core,
title={Core Tokensets for Data-efficient Sequential Training of Transformers},
author={Subarnaduti Paul, Manuel Brack, Patrick Schramowski, Kristian
Kersting, Martin Mundt},
journal={arXiv preprint arXiv:2410.05800},
year={2024},
archivePrefix={arXiv},
eprint={2410.05800},
primaryClass={cs.CV cs.AI}
} | paul2024core |
arxiv-666960 | 2410.05801 | Retrieving, Rethinking and Revising: The Chain-of-Verification Can Improve Retrieval Augmented Generation | <|reference_start|>Retrieving, Rethinking and Revising: The Chain-of-Verification Can Improve Retrieval Augmented Generation: Recent Retrieval Augmented Generation (RAG) aims to enhance Large Language Models (LLMs) by incorporating extensive knowledge retrieved from external sources. However, such approach encounters some challenges: Firstly, the original queries may not be suitable for precise retrieval, resulting in erroneous contextual knowledge; Secondly, the language model can easily generate inconsistent answer with external references due to their knowledge boundary limitation. To address these issues, we propose the chain-of-verification (CoV-RAG) to enhance the external retrieval correctness and internal generation consistency. Specifically, we integrate the verification module into the RAG, engaging in scoring, judgment, and rewriting. To correct external retrieval errors, CoV-RAG retrieves new knowledge using a revised query. To correct internal generation errors, we unify QA and verification tasks with a Chain-of-Thought (CoT) reasoning during training. Our comprehensive experiments across various LLMs demonstrate the effectiveness and adaptability compared with other strong baselines. Especially, our CoV-RAG can significantly surpass the state-of-the-art baselines using different LLM backbones.<|reference_end|> | arxiv | @article{he2024retrieving,,
title={Retrieving, Rethinking and Revising: The Chain-of-Verification Can
Improve Retrieval Augmented Generation},
author={Bolei He, Nuo Chen, Xinran He, Lingyong Yan, Zhenkai Wei, Jinchang
Luo, Zhen-Hua Ling},
journal={arXiv preprint arXiv:2410.05801},
year={2024},
archivePrefix={arXiv},
eprint={2410.05801},
primaryClass={cs.CL cs.AI}
} | he2024retrieving, |
arxiv-666961 | 2410.05802 | Gradual Learning: Optimizing Fine-Tuning with Partially Mastered Knowledge in Large Language Models | <|reference_start|>Gradual Learning: Optimizing Fine-Tuning with Partially Mastered Knowledge in Large Language Models: During the pretraining phase, large language models (LLMs) acquire vast amounts of knowledge from extensive text corpora. Nevertheless, in later stages such as fine-tuning and inference, the model may encounter knowledge not covered in the initial training, which can lead to hallucinations and degraded performance. This issue has a profound impact on the model's capabilities, as it will inevitably face out-of-scope knowledge after pretraining. Furthermore, fine-tuning is often required to adapt LLMs to domain-specific tasks. However, this phenomenon limits the model's ability to learn and integrate new information during fine-tuning. The effectiveness of fine-tuning largely depends on the type of knowledge involved. Existing research suggests that fine-tuning the model on partially mastered knowledge-for instance, question-answer pairs where the model has a chance of providing correct responses under non-greedy decoding-can enable the model to acquire new knowledge while mitigating hallucination. Notably, this approach can still lead to the forgetting of fully mastered knowledge, constraining the fine-tuning dataset to a narrower range and limiting the model's overall potential for improvement. Given the model's intrinsic reasoning abilities and the interconnectedness of different knowledge areas, it is likely that as the model's capacity to utilize existing knowledge improves during fine-tuning, previously unmastered knowledge may become more understandable. To explore this hypothesis, we conducted experiments and, based on the results, proposed a two-stage fine-tuning strategy. This approach not only improves the model's overall test accuracy and knowledge retention but also preserves its accuracy on previously mastered content. When fine-tuning on the WikiQA dataset, our method increases the amount of knowledge acquired by the model in this stage by 24%.<|reference_end|> | arxiv | @article{li2024gradual,
title={Gradual Learning: Optimizing Fine-Tuning with Partially Mastered
Knowledge in Large Language Models},
author={Bozhou Li, Hao Liang, Yang Li, Fangcheng Fu, Hongzhi Yin, Conghui He,
Wentao Zhang},
journal={arXiv preprint arXiv:2410.05802},
year={2024},
archivePrefix={arXiv},
eprint={2410.05802},
primaryClass={cs.CL}
} | li2024gradual |
arxiv-666962 | 2410.05804 | CASA: Class-Agnostic Shared Attributes in Vision-Language Models for Efficient Incremental Object Detection | <|reference_start|>CASA: Class-Agnostic Shared Attributes in Vision-Language Models for Efficient Incremental Object Detection: Incremental object detection (IOD) is challenged by background shift, where background categories in sequential data may include previously learned or future classes. Inspired by the vision-language foundation models such as CLIP, these models capture shared attributes from extensive image-text paired data during pre-training. We propose a novel method utilizing attributes in vision-language foundation models for incremental object detection. Our method constructs a Class-Agnostic Shared Attribute base (CASA) to capture common semantic information among incremental classes. Specifically, we utilize large language models to generate candidate textual attributes and select the most relevant ones based on current training data, recording their significance in an attribute assignment matrix. For subsequent tasks, we freeze the retained attributes and continue selecting from the remaining candidates while updating the attribute assignment matrix accordingly. Furthermore, we employ OWL-ViT as our baseline, preserving the original parameters of the pre-trained foundation model. Our method adds only 0.7% to parameter storage through parameter-efficient fine-tuning to significantly enhance the scalability and adaptability of IOD. Extensive two-phase and multi-phase experiments on the COCO dataset demonstrate the state-of-the-art performance of our proposed method.<|reference_end|> | arxiv | @article{guo2024casa:,
title={CASA: Class-Agnostic Shared Attributes in Vision-Language Models for
Efficient Incremental Object Detection},
author={Mingyi Guo, Yuyang Liu, Zongying Lin, Peixi Peng and Yonghong Tian},
journal={arXiv preprint arXiv:2410.05804},
year={2024},
archivePrefix={arXiv},
eprint={2410.05804},
primaryClass={cs.CV}
} | guo2024casa: |
arxiv-666963 | 2410.05805 | PostCast: Generalizable Postprocessing for Precipitation Nowcasting via Unsupervised Blurriness Modeling | <|reference_start|>PostCast: Generalizable Postprocessing for Precipitation Nowcasting via Unsupervised Blurriness Modeling: Precipitation nowcasting plays a pivotal role in socioeconomic sectors, especially in severe convective weather warnings. Although notable progress has been achieved by approaches mining the spatiotemporal correlations with deep learning, these methods still suffer severe blurriness as the lead time increases, which hampers accurate predictions for extreme precipitation. To alleviate blurriness, researchers explore generative methods conditioned on blurry predictions. However, the pairs of blurry predictions and corresponding ground truth need to be generated in advance, making the training pipeline cumbersome and limiting the generality of generative models within blur modes that appear in training data. By rethinking the blurriness in precipitation nowcasting as a blur kernel acting on predictions, we propose an unsupervised postprocessing method to eliminate the blurriness without the requirement of training with the pairs of blurry predictions and corresponding ground truth. Specifically, we utilize blurry predictions to guide the generation process of a pre-trained unconditional denoising diffusion probabilistic model (DDPM) to obtain high-fidelity predictions with eliminated blurriness. A zero-shot blur kernel estimation mechanism and an auto-scale denoise guidance strategy are introduced to adapt the unconditional DDPM to any blurriness modes varying from datasets and lead times in precipitation nowcasting. Extensive experiments are conducted on 7 precipitation radar datasets, demonstrating the generality and superiority of our method.<|reference_end|> | arxiv | @article{gong2024postcast:,
title={PostCast: Generalizable Postprocessing for Precipitation Nowcasting via
Unsupervised Blurriness Modeling},
author={Junchao Gong, Siwei Tu, Weidong Yang, Ben Fei, Kun Chen, Wenlong
Zhang, Xiaokang Yang, Wanli Ouyang, Lei Bai},
journal={arXiv preprint arXiv:2410.05805},
year={2024},
archivePrefix={arXiv},
eprint={2410.05805},
primaryClass={cs.CV cs.AI}
} | gong2024postcast: |
arxiv-666964 | 2410.05806 | A Parameter Update Balancing Algorithm for Multi-task Ranking Models in Recommendation Systems | <|reference_start|>A Parameter Update Balancing Algorithm for Multi-task Ranking Models in Recommendation Systems: Multi-task ranking models have become essential for modern real-world recommendation systems. While most recommendation researches focus on designing sophisticated models for specific scenarios, achieving performance improvement for multi-task ranking models across various scenarios still remains a significant challenge. Training all tasks naively can result in inconsistent learning, highlighting the need for the development of multi-task optimization (MTO) methods to tackle this challenge. Conventional methods assume that the optimal joint gradient on shared parameters leads to optimal parameter updates. However, the actual update on model parameters may deviates significantly from gradients when using momentum based optimizers such as Adam, and we design and execute statistical experiments to support the observation. In this paper, we propose a novel Parameter Update Balancing algorithm for multi-task optimization, denoted as PUB. In contrast to traditional MTO method which are based on gradient level tasks fusion or loss level tasks fusion, PUB is the first work to optimize multiple tasks through parameter update balancing. Comprehensive experiments on benchmark multi-task ranking datasets demonstrate that PUB consistently improves several multi-task backbones and achieves state-of-the-art performance. Additionally, experiments on benchmark computer vision datasets show the great potential of PUB in various multi-task learning scenarios. Furthermore, we deployed our method for an industrial evaluation on the real-world commercial platform, HUAWEI AppGallery, where PUB significantly enhances the online multi-task ranking model, efficiently managing the primary traffic of a crucial channel.<|reference_end|> | arxiv | @article{yuan2024a,
title={A Parameter Update Balancing Algorithm for Multi-task Ranking Models in
Recommendation Systems},
author={Jun Yuan, Guohao Cai and Zhenhua Dong},
journal={arXiv preprint arXiv:2410.05806},
year={2024},
archivePrefix={arXiv},
eprint={2410.05806},
primaryClass={cs.IR cs.AI cs.LG}
} | yuan2024a |
arxiv-666965 | 2410.05807 | Extended convexity and smoothness and their applications in deep learning | <|reference_start|>Extended convexity and smoothness and their applications in deep learning: The underlying mechanism by which simple gradient-based iterative algorithms can effectively handle the non-convex problem of deep model training remains incompletely understood within the traditional convex and non-convex analysis frameworks, which often require the Lipschitz smoothness of the gradient and strong convexity. In this paper, we introduce $\mathcal{H}(\phi)$-convexity and $\mathcal{H}(\Phi)$-smoothness, which broaden the existing concepts of smoothness and convexity, and delineate their fundamental properties. Building on these concepts, we introduce the high-order gradient descent and high-order stochastic gradient descent methods, which serve as extensions to the traditional gradient descent and stochastic gradient descent methods, respectively. Furthermore, we establish descent lemmas for the $\mathcal{H}(\phi)$-convex and $\mathcal{H}(\Phi)$-smooth objective functions when utilizing these four methods. On the basis of these findings, we develop the gradient structure control algorithm to address non-convex optimization objectives, encompassing both the functions represented by machine learning models and common loss functions in deep learning. The effectiveness of the proposed methodology is empirically validated through experiments.<|reference_end|> | arxiv | @article{qi2024extended,
title={Extended convexity and smoothness and their applications in deep
learning},
author={Binchuan Qi},
journal={arXiv preprint arXiv:2410.05807},
year={2024},
archivePrefix={arXiv},
eprint={2410.05807},
primaryClass={cs.LG cs.DS math.OC}
} | qi2024extended |
arxiv-666966 | 2410.05808 | Vision Transformer based Random Walk for Group Re-Identification | <|reference_start|>Vision Transformer based Random Walk for Group Re-Identification: Group re-identification (re-ID) aims to match groups with the same people under different cameras, mainly involves the challenges of group members and layout changes well. Most existing methods usually use the k-nearest neighbor algorithm to update node features to consider changes in group membership, but these methods cannot solve the problem of group layout changes. To this end, we propose a novel vision transformer based random walk framework for group re-ID. Specifically, we design a vision transformer based on a monocular depth estimation algorithm to construct a graph through the average depth value of pedestrian features to fully consider the impact of camera distance on group members relationships. In addition, we propose a random walk module to reconstruct the graph by calculating affinity scores between target and gallery images to remove pedestrians who do not belong to the current group. Experimental results show that our framework is superior to most methods.<|reference_end|> | arxiv | @article{zhang2024vision,
title={Vision Transformer based Random Walk for Group Re-Identification},
author={Guoqing Zhang, Tianqi Liu, Wenxuan Fang and Yuhui Zheng},
journal={arXiv preprint arXiv:2410.05808},
year={2024},
archivePrefix={arXiv},
eprint={2410.05808},
primaryClass={cs.CV}
} | zhang2024vision |
arxiv-666967 | 2410.05810 | Uncertainty-Aware Fairness-Adaptive Classification Trees | <|reference_start|>Uncertainty-Aware Fairness-Adaptive Classification Trees: In an era where artificial intelligence and machine learning algorithms increasingly impact human life, it is crucial to develop models that account for potential discrimination in their predictions. This paper tackles this problem by introducing a new classification tree algorithm using a novel splitting criterion that incorporates fairness adjustments into the tree-building process. The proposed method integrates a fairness-aware impurity measure that balances predictive accuracy with fairness across protected groups. By ensuring that each splitting node considers both the gain in classification error and the fairness, our algorithm encourages splits that mitigate discrimination. Importantly, in penalizing unfair splits, we account for the uncertainty in the fairness metric by utilizing its confidence interval instead of relying on its point estimate. Experimental results on benchmark and synthetic datasets illustrate that our method effectively reduces discriminatory predictions compared to traditional classification trees, without significant loss in overall accuracy.<|reference_end|> | arxiv | @article{gottard2024uncertainty-aware,
title={Uncertainty-Aware Fairness-Adaptive Classification Trees},
author={Anna Gottard and Vanessa Verrina and Sabrina Giordano},
journal={arXiv preprint arXiv:2410.05810},
year={2024},
archivePrefix={arXiv},
eprint={2410.05810},
primaryClass={stat.ML cs.LG}
} | gottard2024uncertainty-aware |
arxiv-666968 | 2410.05811 | lintsampler: Easy random sampling via linear interpolation | <|reference_start|>lintsampler: Easy random sampling via linear interpolation: 'lintsampler' provides a Python implementation of a technique we term 'linear interpolant sampling': an algorithm to efficiently draw pseudo-random samples from an arbitrary probability density function (PDF). First, the PDF is evaluated on a grid-like structure. Then, it is assumed that the PDF can be approximated between grid vertices by the (multidimensional) linear interpolant. With this assumption, random samples can be efficiently drawn via inverse transform sampling. lintsampler is primarily written with 'numpy', drawing some additional functionality from 'scipy'. Under the most basic usage of lintsampler, the user provides a Python function defining the target PDF and some parameters describing a grid-like structure to the 'LintSampler' class, and is then able to draw samples via the 'sample' method. Additionally, there is functionality for the user to set the random seed, employ quasi-Monte Carlo sampling, or sample within a premade grid ('DensityGrid') or tree ('DensityTree') structure.<|reference_end|> | arxiv | @article{naik2024lintsampler:,
title={lintsampler: Easy random sampling via linear interpolation},
author={Aneesh P. Naik and Michael S. Petersen},
journal={Journal of Open Source Software, 2024, 9(102), 6906},
year={2024},
doi={10.21105/joss.06906},
archivePrefix={arXiv},
eprint={2410.05811},
primaryClass={stat.CO astro-ph.IM cs.MS math.PR}
} | naik2024lintsampler: |
arxiv-666969 | 2410.05813 | Single Actuator Undulation Soft-bodied Robots Using A Precompressed Variable Thickness Flexible Beam | <|reference_start|>Single Actuator Undulation Soft-bodied Robots Using A Precompressed Variable Thickness Flexible Beam: Soft robots - due to their intrinsic flexibility of the body - can adaptively navigate unstructured environments. One of the most popular locomotion gaits that has been implemented in soft robots is undulation. The undulation motion in soft robots resembles the locomotion gait of stringy creatures such as snakes, eels, and C. Elegans. Typically, the implementation of undulation locomotion on a soft robot requires many actuators to control each segment of the stringy body. The added weight of multiple actuators limits the navigating performance of soft-bodied robots. In this paper, we propose a simple tendon-driven flexible beam with only one actuator (a DC motor) that can generate a mechanical traveling wave along the beam to support the undulation locomotion of soft robots. The beam will be precompressed along its axis by shortening the length of the two tendons to form an S-shape, thus pretensioning the tendons. The motor will wind and unwind the tendons to deform the flexible beam and generate traveling waves along the body of the robot. We experiment with different pre-tension to characterize the relationship between tendon pre-tension forces and the DC-motor winding/unwinding. Our proposal enables a simple implementation of undulation motion to support the locomotion of soft-bodied robots.<|reference_end|> | arxiv | @article{ta2024single,
title={Single Actuator Undulation Soft-bodied Robots Using A Precompressed
Variable Thickness Flexible Beam},
author={Tung D. Ta},
journal={arXiv preprint arXiv:2410.05813},
year={2024},
archivePrefix={arXiv},
eprint={2410.05813},
primaryClass={cs.RO}
} | ta2024single |
arxiv-666970 | 2410.05814 | CALoR: Towards Comprehensive Model Inversion Defense | <|reference_start|>CALoR: Towards Comprehensive Model Inversion Defense: Model Inversion Attacks (MIAs) aim at recovering privacy-sensitive training data from the knowledge encoded in the released machine learning models. Recent advances in the MIA field have significantly enhanced the attack performance under multiple scenarios, posing serious privacy risks of Deep Neural Networks (DNNs). However, the development of defense strategies against MIAs is relatively backward to resist the latest MIAs and existing defenses fail to achieve further trade-off between model utility and model robustness. In this paper, we provide an in-depth analysis from the perspective of intrinsic vulnerabilities of MIAs, comprehensively uncovering the weaknesses inherent in the basic pipeline, which are partially investigated in the previous defenses. Building upon these new insights, we propose a robust defense mechanism, integrating Confidence Adaptation and Low-Rank compression(CALoR). Our method includes a novel robustness-enhanced classification loss specially-designed for model inversion defenses and reveals the extraordinary effectiveness of compressing the classification header. With CALoR, we can mislead the optimization objective, reduce the leaked information and impede the backpropagation of MIAs, thus mitigating the risk of privacy leakage. Extensive experimental results demonstrate that our method achieves state-of-the-art (SOTA) defense performance against MIAs and exhibits superior generalization to existing defenses across various scenarios.<|reference_end|> | arxiv | @article{yu2024calor:,
title={CALoR: Towards Comprehensive Model Inversion Defense},
author={Hongyao Yu, Yixiang Qiu, Hao Fang, Bin Chen, Sijin Yu, Bin Wang,
Shu-Tao Xia, Ke Xu},
journal={arXiv preprint arXiv:2410.05814},
year={2024},
archivePrefix={arXiv},
eprint={2410.05814},
primaryClass={cs.CR cs.CV cs.LG}
} | yu2024calor: |
arxiv-666971 | 2410.05817 | Probing Language Models on Their Knowledge Source | <|reference_start|>Probing Language Models on Their Knowledge Source: Large Language Models (LLMs) often encounter conflicts between their learned, internal (parametric knowledge, PK) and external knowledge provided during inference (contextual knowledge, CK). Understanding how LLMs models prioritize one knowledge source over the other remains a challenge. In this paper, we propose a novel probing framework to explore the mechanisms governing the selection between PK and CK in LLMs. Using controlled prompts designed to contradict the model's PK, we demonstrate that specific model activations are indicative of the knowledge source employed. We evaluate this framework on various LLMs of different sizes and demonstrate that mid-layer activations, particularly those related to relations in the input, are crucial in predicting knowledge source selection, paving the way for more reliable models capable of handling knowledge conflicts effectively.<|reference_end|> | arxiv | @article{tighidet2024probing,
title={Probing Language Models on Their Knowledge Source},
author={Zineddine Tighidet, Andrea Mogini, Jiali Mei, Benjamin Piwowarski,
Patrick Gallinari},
journal={arXiv preprint arXiv:2410.05817},
year={2024},
archivePrefix={arXiv},
eprint={2410.05817},
primaryClass={cs.CL}
} | tighidet2024probing |
arxiv-666972 | 2410.05819 | CAP: Detecting Unauthorized Data Usage in Generative Models via Prompt Generation | <|reference_start|>CAP: Detecting Unauthorized Data Usage in Generative Models via Prompt Generation: To achieve accurate and unbiased predictions, Machine Learning (ML) models rely on large, heterogeneous, and high-quality datasets. However, this could raise ethical and legal concerns regarding copyright and authorization aspects, especially when information is gathered from the Internet. With the rise of generative models, being able to track data has become of particular importance, especially since they may (un)intentionally replicate copyrighted contents. Therefore, this work proposes Copyright Audit via Prompts generation (CAP), a framework for automatically testing whether an ML model has been trained with unauthorized data. Specifically, we devise an approach to generate suitable keys inducing the model to reveal copyrighted contents. To prove its effectiveness, we conducted an extensive evaluation campaign on measurements collected in four IoT scenarios. The obtained results showcase the effectiveness of CAP, when used against both realistic and synthetic datasets.<|reference_end|> | arxiv | @article{gallo2024cap:,
title={CAP: Detecting Unauthorized Data Usage in Generative Models via Prompt
Generation},
author={Daniela Gallo, Angelica Liguori, Ettore Ritacco, Luca Caviglione,
Fabrizio Durante, Giuseppe Manco},
journal={arXiv preprint arXiv:2410.05819},
year={2024},
archivePrefix={arXiv},
eprint={2410.05819},
primaryClass={cs.LG}
} | gallo2024cap: |
arxiv-666973 | 2410.05820 | IncSAR: A Dual Fusion Incremental Learning Framework for SAR Target Recognition | <|reference_start|>IncSAR: A Dual Fusion Incremental Learning Framework for SAR Target Recognition: Deep learning techniques have been successfully applied in Synthetic Aperture Radar (SAR) target recognition in static scenarios relying on predefined datasets. However, in real-world scenarios, models must incrementally learn new information without forgetting previously learned knowledge. Models' tendency to forget old knowledge when learning new tasks, known as catastrophic forgetting, remains an open challenge. In this paper, an incremental learning framework, called IncSAR, is proposed to mitigate catastrophic forgetting in SAR target recognition. IncSAR comprises a Vision Transformer (ViT) and a custom-designed Convolutional Neural Network (CNN) in individual branches combined through a late-fusion strategy. A denoising module, utilizing the properties of Robust Principal Component Analysis (RPCA), is introduced to alleviate the speckle noise present in SAR images. Moreover, a random projection layer is employed to enhance the linear separability of features, and a Linear Discriminant Analysis (LDA) approach is proposed to decorrelate the extracted class prototypes. Experimental results on the MSTAR and OpenSARShip benchmark datasets demonstrate that IncSAR outperforms state-of-the-art approaches, leading to an improvement from $98.05\%$ to $99.63\%$ in average accuracy and from $3.05\%$ to $0.33\%$ in performance dropping rate.<|reference_end|> | arxiv | @article{karantaidis2024incsar:,
title={IncSAR: A Dual Fusion Incremental Learning Framework for SAR Target
Recognition},
author={George Karantaidis, Athanasios Pantsios, Yiannis Kompatsiaris, Symeon
Papadopoulos},
journal={arXiv preprint arXiv:2410.05820},
year={2024},
archivePrefix={arXiv},
eprint={2410.05820},
primaryClass={cs.CV}
} | karantaidis2024incsar: |
arxiv-666974 | 2410.05821 | A Zero-Shot approach to the Conversational Tree Search Task | <|reference_start|>A Zero-Shot approach to the Conversational Tree Search Task: In sensitive domains, such as legal or medial domains, the correctness of information given to users is critical. To address this, the recently introduced task Conversational Tree Search (CTS) provides a graph-based framework for controllable task-oriented dialog in sensitive domains. However, a big drawback of state-of-the-art CTS agents is their long training time, which is especially problematic as a new agent must be trained every time the associated domain graph is updated. The goal of this paper is to eliminate the need for training CTS agents altogether. To achieve this, we implement a novel LLM-based method for zero-shot, controllable CTS agents. We show that these agents significantly outperform state-of-the-art CTS agents (p<0.0001; Barnard Exact test) in simulation. This generalizes to all available CTS domains. Finally, we perform user evaluation to test the agent performance in the wild, showing that our policy significantly (p<0.05; Barnard Exact) improves task-success compared to the state-of-the-art Reinforcement Learning-based CTS agent.<|reference_end|> | arxiv | @article{väth2024a,
title={A Zero-Shot approach to the Conversational Tree Search Task},
author={Dirk V"ath, Ngoc Thang Vu},
journal={arXiv preprint arXiv:2410.05821},
year={2024},
archivePrefix={arXiv},
eprint={2410.05821},
primaryClass={cs.CL}
} | väth2024a |
arxiv-666975 | 2410.05824 | Multi-Session Client-Centered Treatment Outcome Evaluation in Psychotherapy | <|reference_start|>Multi-Session Client-Centered Treatment Outcome Evaluation in Psychotherapy: In psychotherapy, therapeutic outcome assessment, or treatment outcome evaluation, is essential for enhancing mental health care by systematically evaluating therapeutic processes and outcomes. Existing large language model approaches often focus on therapist-centered, single-session evaluations, neglecting the client's subjective experience and longitudinal progress across multiple sessions. To address these limitations, we propose IPAEval, a client-Informed Psychological Assessment-based Evaluation framework that automates treatment outcome evaluations from the client's perspective using clinical interviews. IPAEval integrates cross-session client-contextual assessment and session-focused client-dynamics assessment to provide a comprehensive understanding of therapeutic progress. Experiments on our newly developed TheraPhase dataset demonstrate that IPAEval effectively tracks symptom severity and treatment outcomes over multiple sessions, outperforming previous single-session models and validating the benefits of items-aware reasoning mechanisms.<|reference_end|> | arxiv | @article{na2024multi-session,
title={Multi-Session Client-Centered Treatment Outcome Evaluation in
Psychotherapy},
author={Hongbin Na, Tao Shen, Shumao Yu, Ling Chen},
journal={arXiv preprint arXiv:2410.05824},
year={2024},
archivePrefix={arXiv},
eprint={2410.05824},
primaryClass={cs.CL}
} | na2024multi-session |
arxiv-666976 | 2410.05827 | Towards an Operational Responsible AI Framework for Learning Analytics in Higher Education | <|reference_start|>Towards an Operational Responsible AI Framework for Learning Analytics in Higher Education: Universities are increasingly adopting data-driven strategies to enhance student success, with AI applications like Learning Analytics (LA) and Predictive Learning Analytics (PLA) playing a key role in identifying at-risk students, personalising learning, supporting teachers, and guiding educational decision-making. However, concerns are rising about potential harms these systems may pose, such as algorithmic biases leading to unequal support for minority students. While many have explored the need for Responsible AI in LA, existing works often lack practical guidance for how institutions can operationalise these principles. In this paper, we propose a novel Responsible AI framework tailored specifically to LA in Higher Education (HE). We started by mapping 11 established Responsible AI frameworks, including those by leading tech companies, to the context of LA in HE. This led to the identification of seven key principles such as transparency, fairness, and accountability. We then conducted a systematic review of the literature to understand how these principles have been applied in practice. Drawing from these findings, we present a novel framework that offers practical guidance to HE institutions and is designed to evolve with community input, ensuring its relevance as LA systems continue to develop.<|reference_end|> | arxiv | @article{tirado2024towards,
title={Towards an Operational Responsible AI Framework for Learning Analytics
in Higher Education},
author={Alba Morales Tirado, Paul Mulholland and Miriam Fernandez},
journal={arXiv preprint arXiv:2410.05827},
year={2024},
archivePrefix={arXiv},
eprint={2410.05827},
primaryClass={cs.CY cs.AI}
} | tirado2024towards |
arxiv-666977 | 2410.05828 | Effort Allocation for Deadline-Aware Task and Motion Planning: A Metareasoning Approach | <|reference_start|>Effort Allocation for Deadline-Aware Task and Motion Planning: A Metareasoning Approach: In robot planning, tasks can often be achieved through multiple options, each consisting of several actions. This work specifically addresses deadline constraints in task and motion planning, aiming to find a plan that can be executed within the deadline despite uncertain planning and execution times. We propose an effort allocation problem, formulated as a Markov decision process (MDP), to find such a plan by leveraging metareasoning perspectives to allocate computational resources among the given options. We formally prove the NP-hardness of the problem by reducing it from the knapsack problem. Both a model-based approach, where transition models are learned from past experience, and a model-free approach, which overcomes the unavailability of prior data acquisition through reinforcement learning, are explored. For the model-based approach, we investigate Monte Carlo tree search (MCTS) to approximately solve the proposed MDP and further design heuristic schemes to tackle NP-hardness, leading to the approximate yet efficient algorithm called DP_Rerun. In experiments, DP_Rerun demonstrates promising performance comparable to MCTS while requiring negligible computation time.<|reference_end|> | arxiv | @article{sung2024effort,
title={Effort Allocation for Deadline-Aware Task and Motion Planning: A
Metareasoning Approach},
author={Yoonchang Sung, Shahaf S. Shperberg, Qi Wang, and Peter Stone},
journal={arXiv preprint arXiv:2410.05828},
year={2024},
archivePrefix={arXiv},
eprint={2410.05828},
primaryClass={cs.RO}
} | sung2024effort |
arxiv-666978 | 2410.05829 | A GPT-based Decision Transformer for Multi-Vehicle Coordination at Unsignalized Intersections | <|reference_start|>A GPT-based Decision Transformer for Multi-Vehicle Coordination at Unsignalized Intersections: In this paper, we explore the application of the Decision Transformer, a decision-making algorithm based on the Generative Pre-trained Transformer (GPT) architecture, to multi-vehicle coordination at unsignalized intersections. We formulate the coordination problem so as to find the optimal trajectories for multiple vehicles at intersections, modeling it as a sequence prediction task to fully leverage the power of GPTs as a sequence model. Through extensive experiments, we compare our approach to a reservation-based intersection management system. Our results show that the Decision Transformer can outperform the training data in terms of total travel time and can be generalized effectively to various scenarios, including noise-induced velocity variations, continuous interaction environments, and different vehicle numbers and road configurations.<|reference_end|> | arxiv | @article{lee2024a,
title={A GPT-based Decision Transformer for Multi-Vehicle Coordination at
Unsignalized Intersections},
author={Eunjae Lee, Minhee Kang, Yoojin Choi, Heejin Ahn},
journal={arXiv preprint arXiv:2410.05829},
year={2024},
archivePrefix={arXiv},
eprint={2410.05829},
primaryClass={cs.RO}
} | lee2024a |
arxiv-666979 | 2410.05837 | A noise-corrected Langevin algorithm and sampling by half-denoising | <|reference_start|>A noise-corrected Langevin algorithm and sampling by half-denoising: The Langevin algorithm is a classic method for sampling from a given pdf in a real space. In its basic version, it only requires knowledge of the gradient of the log-density, also called the score function. However, in deep learning, it is often easier to learn the so-called "noisy score function", i.e. the gradient of the log-density of noisy data, more precisely when Gaussian noise is added to the data. Such an estimate is biased and complicates the use of the Langevin method. Here, we propose a noise-corrected version of the Langevin algorithm, where the bias due to noisy data is removed, at least regarding first-order terms. Unlike diffusion models, our algorithm needs to know the noisy score function for one single noise level only. We further propose a simple special case which has an interesting intuitive interpretation of iteratively adding noise the data and then attempting to remove half of that noise.<|reference_end|> | arxiv | @article{hyvärinen2024a,
title={A noise-corrected Langevin algorithm and sampling by half-denoising},
author={Aapo Hyv"arinen},
journal={arXiv preprint arXiv:2410.05837},
year={2024},
archivePrefix={arXiv},
eprint={2410.05837},
primaryClass={cs.LG stat.ML}
} | hyvärinen2024a |
arxiv-666980 | 2410.05838 | Time Transfer: On Optimal Learning Rate and Batch Size In The Infinite Data Limit | <|reference_start|>Time Transfer: On Optimal Learning Rate and Batch Size In The Infinite Data Limit: One of the main challenges in optimal scaling of large language models (LLMs) is the prohibitive cost of hyperparameter tuning, particularly learning rate $\eta$ and batch size $B$. While techniques like $\mu$P (Yang et al., 2022) provide scaling rules for optimal $\eta$ transfer in the infinite model size limit, the optimal scaling behavior in the infinite data size limit ($T \to \infty$) remains unknown. We fill in this gap by observing for the first time an interplay of three optimal $\eta$ scaling regimes: $\eta \propto \sqrt{T}$, $\eta \propto 1$, and $\eta \propto 1/\sqrt{T}$ with transitions controlled by $B$ and its relation to the time-evolving critical batch size $B_\mathrm{crit} \propto T$. Furthermore, we show that the optimal batch size is positively correlated with $B_\mathrm{crit}$: keeping it fixed becomes suboptimal over time even if learning rate is scaled optimally. Surprisingly, our results demonstrate that the observed optimal $\eta$ and $B$ dynamics are preserved with $\mu$P model scaling, challenging the conventional view of $B_\mathrm{crit}$ dependence solely on loss value. Complementing optimality, we examine the sensitivity of loss to changes in learning rate, where we find the sensitivity to decrease with $T \to \infty$ and to remain constant with $\mu$P model scaling. We hope our results make the first step towards a unified picture of the joint optimal data and model scaling.<|reference_end|> | arxiv | @article{filatov2024time,
title={Time Transfer: On Optimal Learning Rate and Batch Size In The Infinite
Data Limit},
author={Oleg Filatov, Jan Ebert, Jiangtao Wang, Stefan Kesselheim},
journal={arXiv preprint arXiv:2410.05838},
year={2024},
archivePrefix={arXiv},
eprint={2410.05838},
primaryClass={cs.LG cs.AI}
} | filatov2024time |
arxiv-666981 | 2410.05839 | Bottom-up Anytime Discovery of Generalised Multimodal Graph Patterns for Knowledge Graphs | <|reference_start|>Bottom-up Anytime Discovery of Generalised Multimodal Graph Patterns for Knowledge Graphs: Vast amounts of heterogeneous knowledge are becoming publicly available in the form of knowledge graphs, often linking multiple sources of data that have never been together before, and thereby enabling scholars to answer many new research questions. It is often not known beforehand, however, which questions the data might have the answers to, potentially leaving many interesting and novel insights to remain undiscovered. To support scholars during this scientific workflow, we introduce an anytime algorithm for the bottom-up discovery of generalized multimodal graph patterns in knowledge graphs. Each pattern is a conjunction of binary statements with (data-) type variables, constants, and/or value patterns. Upon discovery, the patterns are converted to SPARQL queries and presented in an interactive facet browser together with metadata and provenance information, enabling scholars to explore, analyse, and share queries. We evaluate our method from a user perspective, with the help of domain experts in the humanities.<|reference_end|> | arxiv | @article{wilcke2024bottom-up,
title={Bottom-up Anytime Discovery of Generalised Multimodal Graph Patterns for
Knowledge Graphs},
author={Xander Wilcke, Rick Mourits, Auke Rijpma, Richard Zijdeman},
journal={arXiv preprint arXiv:2410.05839},
year={2024},
archivePrefix={arXiv},
eprint={2410.05839},
primaryClass={cs.AI cs.DB}
} | wilcke2024bottom-up |
arxiv-666982 | 2410.05842 | Privacy-aware Fully Model-Free Event-triggered Cloud-based HVAC Control | <|reference_start|>Privacy-aware Fully Model-Free Event-triggered Cloud-based HVAC Control: Privacy is a major concern when computing-as-a-service (CaaS) platforms, e.g., cloud-computing platforms, are utilized for building automation, as CaaS platforms can infer sensitive information, such as occupancy, using the sensor measurements of a building. Although the existing encrypted model-based control algorithms can ensure the security and privacy of sensor measurements, they are highly complex to implement and require high computational resources, which result in a high cost of using CaaS platforms. To address these issues, in this paper, we propose an encrypted fully model-free event-triggered cloud-based HVAC control framework that ensures the privacy of occupancy information and minimizes the communication and computation overhead associated with encrypted HVAC control. To this end, we first develop a model-free controller for regulating indoor temperature and CO2 levels. We then design a model-free event-triggering unit which reduces the communication and computation costs of encrypted HVAC control using an optimal triggering policy. Finally, we evaluate the performance of the proposed encrypted fully model-free event-triggered cloud-based HVAC control framework using the TRNSYS simulator, comparing it to an encrypted model-based event-triggered control framework, which uses model predictive control to regulate the indoor climate. Our numerical results demonstrate that, compared to the encrypted model-based method, the proposed fully model-free framework improves the control performance while reducing the communication and computation costs. More specifically, it reduces the communication between the system and the CaaS platform by 64% amount, and its computation time is 75% less than that of the model-based control.<|reference_end|> | arxiv | @article{feng2024privacy-aware,
title={Privacy-aware Fully Model-Free Event-triggered Cloud-based HVAC Control},
author={Zhenan Feng, Ehsan Nekouei},
journal={arXiv preprint arXiv:2410.05842},
year={2024},
archivePrefix={arXiv},
eprint={2410.05842},
primaryClass={eess.SY cs.SY}
} | feng2024privacy-aware |
arxiv-666983 | 2410.05844 | Spectrally Efficient LDPC Codes For IRIG-106 Waveforms via Random Puncturing | <|reference_start|>Spectrally Efficient LDPC Codes For IRIG-106 Waveforms via Random Puncturing: Low-density parity-check (LDPC) codes form part of the IRIG-106 standard and have been successfully deployed for the Telemetry Group version of shaped-offset quadrature phase shift keying (SOQPSK-TG) modulation. Recently, LDPC code solutions have been proposed and optimized for continuous phase modulations (CPMs), including the pulse code modulation/frequency modulation (PCM/FM) and the multi-h CPM developed by the Advanced Range TeleMetry program (ARTM CPM). These codes were shown to perform around one dB from the respective channel capacities of these modulations. In this paper, we consider the effect of random puncturing of these LDPC codes to further improve spectrum efficiency. We present numerical simulation results that affirm the robust decoding performance promised by LDPC codes designed for ARTM CPM.<|reference_end|> | arxiv | @article{cummins2024spectrally,
title={Spectrally Efficient LDPC Codes For IRIG-106 Waveforms via Random
Puncturing},
author={Andrew D. Cummins, David G. M. Mitchell, Erik Perrins},
journal={arXiv preprint arXiv:2410.05844},
year={2024},
archivePrefix={arXiv},
eprint={2410.05844},
primaryClass={eess.SY cs.IT cs.SY eess.SP math.IT}
} | cummins2024spectrally |
arxiv-666984 | 2410.05849 | ModalPrompt:Dual-Modality Guided Prompt for Continual Learning of Large Multimodal Models | <|reference_start|>ModalPrompt:Dual-Modality Guided Prompt for Continual Learning of Large Multimodal Models: Large Multimodal Models (LMMs) exhibit remarkable multi-tasking ability by learning mixed datasets jointly. However, novel tasks would be encountered sequentially in dynamic world, and continually fine-tuning LMMs often leads to performance degrades. To handle the challenges of catastrophic forgetting, existing methods leverage data replay or model expansion, both of which are not specially developed for LMMs and have their inherent limitations. In this paper, we propose a novel dual-modality guided prompt learning framework (ModalPrompt) tailored for multimodal continual learning to effectively learn new tasks while alleviating forgetting of previous knowledge. Concretely, we learn prototype prompts for each task and exploit efficient prompt selection for task identifiers and prompt fusion for knowledge transfer based on image-text supervision. Extensive experiments demonstrate the superiority of our approach, e.g., ModalPrompt achieves +20% performance gain on LMMs continual learning benchmarks with $\times$ 1.42 inference speed refraining from growing training cost in proportion to the number of tasks. The code will be made publically available.<|reference_end|> | arxiv | @article{zeng2024modalprompt:dual-modality,
title={ModalPrompt:Dual-Modality Guided Prompt for Continual Learning of Large
Multimodal Models},
author={Fanhu Zeng, Fei Zhu, Haiyang Guo, Xu-Yao Zhang, Cheng-Lin Liu},
journal={arXiv preprint arXiv:2410.05849},
year={2024},
archivePrefix={arXiv},
eprint={2410.05849},
primaryClass={cs.CV}
} | zeng2024modalprompt:dual-modality |
arxiv-666985 | 2410.05851 | Communicating with Speakers and Listeners of Different Pragmatic Levels | <|reference_start|>Communicating with Speakers and Listeners of Different Pragmatic Levels: This paper explores the impact of variable pragmatic competence on communicative success through simulating language learning and conversing between speakers and listeners with different levels of reasoning abilities. Through studying this interaction, we hypothesize that matching levels of reasoning between communication partners would create a more beneficial environment for communicative success and language learning. Our research findings indicate that learning from more explicit, literal language is advantageous, irrespective of the learner's level of pragmatic competence. Furthermore, we find that integrating pragmatic reasoning during language learning, not just during evaluation, significantly enhances overall communication performance. This paper provides key insights into the importance of aligning reasoning levels and incorporating pragmatic reasoning in optimizing communicative interactions.<|reference_end|> | arxiv | @article{naszadi2024communicating,
title={Communicating with Speakers and Listeners of Different Pragmatic Levels},
author={Kata Naszadi, Frans A. Oliehoek, Christof Monz},
journal={arXiv preprint arXiv:2410.05851},
year={2024},
archivePrefix={arXiv},
eprint={2410.05851},
primaryClass={cs.CL cs.AI}
} | naszadi2024communicating |
arxiv-666986 | 2410.05852 | A$^3$L-FEC: Age-Aware Application Layer Forward Error Correction Flow Control | <|reference_start|>A$^3$L-FEC: Age-Aware Application Layer Forward Error Correction Flow Control: Age of Information (AoI) is a metric and KPI that has been developed for measuring and controlling data freshness. Optimization of AoI in a real-life network requires adapting the rate and timing of transmissions to varying network conditions. The vast majority of previous research on the control of AoI has been theoretical, using idealized models which ignored certain implementation aspects. As such, there is still a gap between the research on AoI and real-world protocols. In this paper we present an effort toward closing this gap by introducing an age-aware flow control algorithm. The algorithm, Age-Aware Application Layer Forward Error Correction (A$^3$L-FEC), is a packet generation mechanism operating on top of the user datagram protocol (UDP). The purpose is to control peak Age of the end-to-end packet flow, specifically, to reduce the rate of what we call "Age Violations", that is, the events where the peak age exceeds a given threshold. Evaluations in Mininet-WiFi and MATLAB indicate that A$^3$L-FEC reduces age violations compared to two related protocols in the literature, namely TCP-BBR and ACP+.<|reference_end|> | arxiv | @article{baghaee2024a$^3$l-fec:,
title={A$^3$L-FEC: Age-Aware Application Layer Forward Error Correction Flow
Control},
author={Sajjad Baghaee, Elif Uysal},
journal={arXiv preprint arXiv:2410.05852},
year={2024},
archivePrefix={arXiv},
eprint={2410.05852},
primaryClass={cs.NI}
} | baghaee2024a$^3$l-fec: |
arxiv-666987 | 2410.05854 | A Scalable State Sharing Protocol for Low-Resource Validator Nodes in Blockchain Networks | <|reference_start|>A Scalable State Sharing Protocol for Low-Resource Validator Nodes in Blockchain Networks: The perpetual growth of data stored on popular blockchains such as Ethereum leads to significant scalability challenges and substantial storage costs for operators of full nodes. Increasing costs may lead to fewer independently operated nodes in the network, which poses risks to decentralization (and hence network security), but also pushes decentralized app developers towards centrally hosted API services. This paper introduces a new protocol that allows validator nodes to participate in a blockchain network without the need to store the full state of the network on each node. The key idea is to use the blockchain network as both a replicated state machine and as a distributed storage system. By distributing states across nodes and enabling efficient data retrieval through a Kademlia-inspired routing protocol, we reduce storage costs for validators. Cryptographic proofs (such as Merkle proofs) are used to allow nodes to verify data stored by other nodes without having to trust those nodes directly. While the protocol trades off data storage for increased network bandwidth, we show how gossiping and caching can minimize the increased bandwidth needs. To validate our state sharing protocol, we conduct an extensive quantitative analysis of Ethereum's data storage and data access patterns. Our findings indicate that while our protocol significantly lowers storage needs, it comes with an increased bandwidth usage ranging from 1.5 MB to 5 MB per block, translating to an additional monthly bandwidth of 319 GB to 1,065 GB. Despite this, the size remains small enough such that it can be passed to all nodes and validated within Ethereum's 12-second block validation window. Further analysis shows that Merkle proofs are the most significant contributor to the additional bandwidth. To address this concern, we also analyze the impact of switching to the more space-efficient Verkle Proofs.<|reference_end|> | arxiv | @article{hias2024a,
title={A Scalable State Sharing Protocol for Low-Resource Validator Nodes in
Blockchain Networks},
author={Ruben Hias, Weihong Wang, Jan Vanhoof and Tom Van Cutsem},
journal={arXiv preprint arXiv:2410.05854},
year={2024},
archivePrefix={arXiv},
eprint={2410.05854},
primaryClass={cs.DC}
} | hias2024a |
arxiv-666988 | 2410.05856 | Stochastic Bandits for Egalitarian Assignment | <|reference_start|>Stochastic Bandits for Egalitarian Assignment: We study EgalMAB, an egalitarian assignment problem in the context of stochastic multi-armed bandits. In EgalMAB, an agent is tasked with assigning a set of users to arms. At each time step, the agent must assign exactly one arm to each user such that no two users are assigned to the same arm. Subsequently, each user obtains a reward drawn from the unknown reward distribution associated with its assigned arm. The agent's objective is to maximize the minimum expected cumulative reward among all users over a fixed horizon. This problem has applications in areas such as fairness in job and resource allocations, among others. We design and analyze a UCB-based policy EgalUCB and establish upper bounds on the cumulative regret. In complement, we establish an almost-matching policy-independent impossibility result.<|reference_end|> | arxiv | @article{lim2024stochastic,
title={Stochastic Bandits for Egalitarian Assignment},
author={Eugene Lim, Vincent Y. F. Tan, Harold Soh},
journal={arXiv preprint arXiv:2410.05856},
year={2024},
archivePrefix={arXiv},
eprint={2410.05856},
primaryClass={stat.ML cs.LG}
} | lim2024stochastic |
arxiv-666989 | 2410.05860 | MelissaDL x Breed: Towards Data-Efficient On-line Supervised Training of Multi-parametric Surrogates with Active Learning | <|reference_start|>MelissaDL x Breed: Towards Data-Efficient On-line Supervised Training of Multi-parametric Surrogates with Active Learning: Artificial intelligence is transforming scientific computing with deep neural network surrogates that approximate solutions to partial differential equations (PDEs). Traditional off-line training methods face issues with storage and I/O efficiency, as the training dataset has to be computed with numerical solvers up-front. Our previous work, the Melissa framework, addresses these problems by enabling data to be created "on-the-fly" and streamed directly into the training process. In this paper we introduce a new active learning method to enhance data-efficiency for on-line surrogate training. The surrogate is direct and multi-parametric, i.e., it is trained to predict a given timestep directly with different initial and boundary conditions parameters. Our approach uses Adaptive Multiple Importance Sampling guided by training loss statistics, in order to focus NN training on the difficult areas of the parameter space. Preliminary results for 2D heat PDE demonstrate the potential of this method, called Breed, to improve the generalization capabilities of surrogates while reducing computational overhead.<|reference_end|> | arxiv | @article{dymchenko2024melissadl,
title={MelissaDL x Breed: Towards Data-Efficient On-line Supervised Training of
Multi-parametric Surrogates with Active Learning},
author={Sofya Dymchenko (DATAMOVE), Abhishek Purandare (DATAMOVE), Bruno
Raffin (DATAMOVE)},
journal={SC Workshop AI4S, Nov 2024, Atlanta (Georgia), United States},
year={2024},
archivePrefix={arXiv},
eprint={2410.05860},
primaryClass={cs.LG cs.AI}
} | dymchenko2024melissadl |
arxiv-666990 | 2410.05863 | Enhancing Playback Performance in Video Recommender Systems with an On-Device Gating and Ranking Framework | <|reference_start|>Enhancing Playback Performance in Video Recommender Systems with an On-Device Gating and Ranking Framework: Video recommender systems (RSs) have gained increasing attention in recent years. Existing mainstream RSs focus on optimizing the matching function between users and items. However, we noticed that users frequently encounter playback issues such as slow loading or stuttering while browsing the videos, especially in weak network conditions, which will lead to a subpar browsing experience, and may cause users to leave, even when the video content and recommendations are superior. It is quite a serious issue, yet easily overlooked. To tackle this issue, we propose an on-device Gating and Ranking Framework (GRF) that cooperates with server-side RS. Specifically, we utilize a gate model to identify videos that may have playback issues in real-time, and then we employ a ranking model to select the optimal result from a locally-cached pool to replace the stuttering videos. Our solution has been fully deployed on Kwai, a large-scale short video platform with hundreds of millions of users globally. Moreover, it significantly enhances video playback performance and improves overall user experience and retention rates.<|reference_end|> | arxiv | @article{yang2024enhancing,
title={Enhancing Playback Performance in Video Recommender Systems with an
On-Device Gating and Ranking Framework},
author={Yunfei Yang, Zhenghao Qi, Honghuan Wu, Qi Song, Tieyao Zhang, Hao Li,
Yimin Tu, Kaiqiao Zhan, Ben Wang},
journal={arXiv preprint arXiv:2410.05863},
year={2024},
archivePrefix={arXiv},
eprint={2410.05863},
primaryClass={cs.IR}
} | yang2024enhancing |
arxiv-666991 | 2410.05864 | From Tokens to Words: On the Inner Lexicon of LLMs | <|reference_start|>From Tokens to Words: On the Inner Lexicon of LLMs: Natural language is composed of words, but modern LLMs process sub-words as input. A natural question raised by this discrepancy is whether LLMs encode words internally, and if so how. We present evidence that LLMs engage in an intrinsic detokenization process, where sub-word sequences are combined into coherent word representations. Our experiments show that this process takes place primarily within the early and middle layers of the model. They also show that it is robust to non-morphemic splits, typos and perhaps importantly-to out-of-vocabulary words: when feeding the inner representation of such words to the model as input vectors, it can "understand" them despite never seeing them during training. Our findings suggest that LLMs maintain a latent vocabulary beyond the tokenizer's scope. These insights provide a practical, finetuning-free application for expanding the vocabulary of pre-trained models. By enabling the addition of new vocabulary words, we reduce input length and inference iterations, which reduces both space and model latency, with little to no loss in model accuracy.<|reference_end|> | arxiv | @article{kaplan2024from,
title={From Tokens to Words: On the Inner Lexicon of LLMs},
author={Guy Kaplan, Matanel Oren, Yuval Reif, and Roy Schwartz},
journal={arXiv preprint arXiv:2410.05864},
year={2024},
archivePrefix={arXiv},
eprint={2410.05864},
primaryClass={cs.CL cs.AI}
} | kaplan2024from |
arxiv-666992 | 2410.05865 | A fourth-order, multigrid cut-cell method for solving Poisson's equation in three-dimensional irregular domains | <|reference_start|>A fourth-order, multigrid cut-cell method for solving Poisson's equation in three-dimensional irregular domains: We propose a fourth-order cut-cell method for solving Poisson's equations in three-dimensional irregular domains. Major distinguishing features of our method include (a) applicable to arbitrarily complex geometries, (b) high order discretization, (c) optimal complexity. Feature (a) is achieved by Yin space, which is a mathematical model for three-dimensional continua. Feature (b) is accomplished by poised lattice generation (PLG) algorithm, which finds stencils near the irregular boundary for polynomial fitting. Besides, for feature (c), we design a modified multigrid solver whose complexity is theoretically optimal by applying nested dissection (ND) ordering method.<|reference_end|> | arxiv | @article{qian2024a,
title={A fourth-order, multigrid cut-cell method for solving Poisson's equation
in three-dimensional irregular domains},
author={Yixiao Qian, Weizhen Li, Yan Tan, Qinghai Zhang},
journal={arXiv preprint arXiv:2410.05865},
year={2024},
archivePrefix={arXiv},
eprint={2410.05865},
primaryClass={math.NA cs.NA}
} | qian2024a |
arxiv-666993 | 2410.05869 | Unobserved Object Detection using Generative Models | <|reference_start|>Unobserved Object Detection using Generative Models: Can we detect an object that is not visible in an image? This study introduces the novel task of 2D and 3D unobserved object detection for predicting the location of objects that are occluded or lie outside the image frame. We adapt several state-of-the-art pre-trained generative models to solve this task, including 2D and 3D diffusion models and vision--language models, and show that they can be used to infer the presence of objects that are not directly observed. To benchmark this task, we propose a suite of metrics that captures different aspects of performance. Our empirical evaluations on indoor scenes from the RealEstate10k dataset with COCO object categories demonstrate results that motivate the use of generative models for the unobserved object detection task. The current work presents a promising step towards compelling applications like visual search and probabilistic planning that can leverage object detection beyond what can be directly observed.<|reference_end|> | arxiv | @article{bhattacharjee2024unobserved,
title={Unobserved Object Detection using Generative Models},
author={Subhransu S. Bhattacharjee and Dylan Campbell and Rahul Shome},
journal={arXiv preprint arXiv:2410.05869},
year={2024},
archivePrefix={arXiv},
eprint={2410.05869},
primaryClass={cs.CV cs.AI cs.RO}
} | bhattacharjee2024unobserved |
arxiv-666994 | 2410.05870 | Heuristics for Partially Observable Stochastic Contingent Planning | <|reference_start|>Heuristics for Partially Observable Stochastic Contingent Planning: Acting to complete tasks in stochastic partially observable domains is an important problem in artificial intelligence, and is often formulated as a goal-based POMDP. Goal-based POMDPs can be solved using the RTDP-BEL algorithm, that operates by running forward trajectories from the initial belief to the goal. These trajectories can be guided by a heuristic, and more accurate heuristics can result in significantly faster convergence. In this paper, we develop a heuristic function that leverages the structured representation of domain models. We compute, in a relaxed space, a plan to achieve the goal, while taking into account the value of information, as well as the stochastic effects. We provide experiments showing that while our heuristic is slower to compute, it requires an order of magnitude less trajectories before convergence. Overall, it thus speeds up RTDP-BEL, particularly in problems where significant information gathering is needed.<|reference_end|> | arxiv | @article{shani2024heuristics,
title={Heuristics for Partially Observable Stochastic Contingent Planning},
author={Guy Shani},
journal={arXiv preprint arXiv:2410.05870},
year={2024},
archivePrefix={arXiv},
eprint={2410.05870},
primaryClass={cs.AI}
} | shani2024heuristics |
arxiv-666995 | 2410.05871 | A second-order-like optimizer with adaptive gradient scaling for deep learning | <|reference_start|>A second-order-like optimizer with adaptive gradient scaling for deep learning: In this empirical article, we introduce INNAprop, an optimization algorithm that combines the INNA method with the RMSprop adaptive gradient scaling. It leverages second-order information and rescaling while keeping the memory requirements of standard DL methods as AdamW or SGD with momentum.After having recalled our geometrical motivations, we provide quite extensive experiments. On image classification (CIFAR-10, ImageNet) and language modeling (GPT-2), INNAprop consistently matches or outperforms AdamW both in training speed and accuracy, with minimal hyperparameter tuning in large-scale settings. Our code is publicly available at \url{https://github.com/innaprop/innaprop}.<|reference_end|> | arxiv | @article{bolte2024a,
title={A second-order-like optimizer with adaptive gradient scaling for deep
learning},
author={J'er^ome Bolte (TSE-R), Ryan Boustany (TSE-R), Edouard Pauwels
(TSE-R, IRIT-ADRIA), Andrei Purica},
journal={arXiv preprint arXiv:2410.05871},
year={2024},
archivePrefix={arXiv},
eprint={2410.05871},
primaryClass={cs.LG cs.AI math.OC}
} | bolte2024a |
arxiv-666996 | 2410.05873 | MEXA: Multilingual Evaluation of English-Centric LLMs via Cross-Lingual Alignment | <|reference_start|>MEXA: Multilingual Evaluation of English-Centric LLMs via Cross-Lingual Alignment: English-centric large language models (LLMs) often show strong multilingual capabilities. However, the multilingual performance of these models remains unclear and is not thoroughly evaluated for many languages. Most benchmarks for multilinguality focus on classic NLP tasks, or cover a minimal number of languages. We introduce MEXA, a method for assessing the multilingual capabilities of pre-trained English-centric LLMs using parallel sentences, which are available for more languages than existing downstream tasks. MEXA leverages the fact that English-centric LLMs use English as a kind of pivot language in their intermediate layers. It computes the alignment between English and non-English languages using parallel sentences to evaluate the transfer of language understanding from English to other languages. This alignment can be used to estimate model performance in other languages. We conduct studies using various parallel datasets (FLORES-200 and Bible), models (Llama family, Gemma family, Mistral, and OLMo), and established downstream tasks (Belebele, m-MMLU, and m-ARC). We explore different methods to compute embeddings in decoder-only models. Our results show that MEXA, in its default settings, achieves a statistically significant average Pearson correlation of 0.90 with three established downstream tasks across nine models and two parallel datasets. This suggests that MEXA is a reliable method for estimating the multilingual capabilities of English-centric LLMs, providing a clearer understanding of their multilingual potential and the inner workings of LLMs. Leaderboard: https://huggingface.co/spaces/cis-lmu/Mexa, Code: https://github.com/cisnlp/Mexa.<|reference_end|> | arxiv | @article{kargaran2024mexa:,
title={MEXA: Multilingual Evaluation of English-Centric LLMs via Cross-Lingual
Alignment},
author={Amir Hossein Kargaran, Ali Modarressi, Nafiseh Nikeghbal, Jana
Diesner, Franc{c}ois Yvon, Hinrich Sch"utze},
journal={arXiv preprint arXiv:2410.05873},
year={2024},
archivePrefix={arXiv},
eprint={2410.05873},
primaryClass={cs.CL cs.AI}
} | kargaran2024mexa: |
arxiv-666997 | 2410.05877 | MDAP: A Multi-view Disentangled and Adaptive Preference Learning Framework for Cross-Domain Recommendation | <|reference_start|>MDAP: A Multi-view Disentangled and Adaptive Preference Learning Framework for Cross-Domain Recommendation: Cross-domain Recommendation systems leverage multi-domain user interactions to improve performance, especially in sparse data or new user scenarios. However, CDR faces challenges such as effectively capturing user preferences and avoiding negative transfer. To address these issues, we propose the Multi-view Disentangled and Adaptive Preference Learning (MDAP) framework. Our MDAP framework uses a multiview encoder to capture diverse user preferences. The framework includes a gated decoder that adaptively combines embeddings from different views to generate a comprehensive user representation. By disentangling representations and allowing adaptive feature selection, our model enhances adaptability and effectiveness. Extensive experiments on benchmark datasets demonstrate that our method significantly outperforms state-of-the-art CDR and single-domain models, providing more accurate recommendations and deeper insights into user behavior across different domains.<|reference_end|> | arxiv | @article{tong2024mdap:,
title={MDAP: A Multi-view Disentangled and Adaptive Preference Learning
Framework for Cross-Domain Recommendation},
author={Junxiong Tong, Mingjia Yin, Hao Wang, Qiushi Pan, Defu Lian and Enhong
Chen},
journal={arXiv preprint arXiv:2410.05877},
year={2024},
archivePrefix={arXiv},
eprint={2410.05877},
primaryClass={cs.IR cs.LG}
} | tong2024mdap: |
arxiv-666998 | 2410.05880 | Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization | <|reference_start|>Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization: We study differentially private (DP) optimization algorithms for stochastic and empirical objectives which are neither smooth nor convex, and propose methods that return a Goldstein-stationary point with sample complexity bounds that improve on existing works. We start by providing a single-pass $(\epsilon,\delta)$-DP algorithm that returns an $(\alpha,\beta)$-stationary point as long as the dataset is of size $\widetilde{\Omega}\left(1/\alpha\beta^{3}+d/\epsilon\alpha\beta^{2}+d^{3/4}/\epsilon^{1/2}\alpha\beta^{5/2}\right)$, which is $\Omega(\sqrt{d})$ times smaller than the algorithm of Zhang et al. [2024] for this task, where $d$ is the dimension. We then provide a multi-pass polynomial time algorithm which further improves the sample complexity to $\widetilde{\Omega}\left(d/\beta^2+d^{3/4}/\epsilon\alpha^{1/2}\beta^{3/2}\right)$, by designing a sample efficient ERM algorithm, and proving that Goldstein-stationary points generalize from the empirical loss to the population loss.<|reference_end|> | arxiv | @article{kornowski2024improved,
title={Improved Sample Complexity for Private Nonsmooth Nonconvex Optimization},
author={Guy Kornowski, Daogao Liu, Kunal Talwar},
journal={arXiv preprint arXiv:2410.05880},
year={2024},
archivePrefix={arXiv},
eprint={2410.05880},
primaryClass={cs.LG cs.CR math.OC stat.ML}
} | kornowski2024improved |
arxiv-666999 | 2410.05881 | Edit Distances and Their Applications to Downstream Tasks in Research and Commercial Contexts | <|reference_start|>Edit Distances and Their Applications to Downstream Tasks in Research and Commercial Contexts: The tutorial describes the concept of edit distances applied to research and commercial contexts. We use Translation Edit Rate (TER), Levenshtein, Damerau-Levenshtein, Longest Common Subsequence and $n$-gram distances to demonstrate the frailty of statistical metrics when comparing text sequences. Our discussion disassembles them into their essential components. We discuss the centrality of four editing actions: insert, delete, replace and move words, and show their implementations in openly available packages and toolkits. The application of edit distances in downstream tasks often assumes that these accurately represent work done by post-editors and real errors that need to be corrected in MT output. We discuss how imperfect edit distances are in capturing the details of this error correction work and the implications for researchers and for commercial applications, of these uses of edit distances. In terms of commercial applications, we discuss their integration in computer-assisted translation tools and how the perception of the connection between edit distances and post-editor effort affects the definition of translator rates.<|reference_end|> | arxiv | @article{carmo2024edit,
title={Edit Distances and Their Applications to Downstream Tasks in Research
and Commercial Contexts},
author={F'elix do Carmo and Diptesh Kanojia},
journal={arXiv preprint arXiv:2410.05881},
year={2024},
archivePrefix={arXiv},
eprint={2410.05881},
primaryClass={cs.CL}
} | carmo2024edit |
arxiv-667000 | 2410.05882 | Future frame prediction in chest cine MR imaging using the PCA respiratory motion model and dynamically trained recurrent neural networks | <|reference_start|>Future frame prediction in chest cine MR imaging using the PCA respiratory motion model and dynamically trained recurrent neural networks: Lung radiotherapy treatment systems are subject to a latency that leads to uncertainty in the estimated tumor location and high irradiation of healthy tissue. This work addresses future frame prediction in chest dynamic MRI sequences to compensate for that delay using RNNs trained with online learning algorithms. The latter enable networks to mitigate irregular movements, as they update synaptic weights with each new training example. Experiments were conducted using four publicly available 2D thoracic cine-MRI sequences. PCA decomposes the time-varying deformation vector field (DVF), computed with the Lucas-Kanade optical flow algorithm, into static deformation fields and low-dimensional time-dependent weights. We compare various algorithms to forecast the latter: linear regression, least mean squares (LMS), and RNNs trained with real-time recurrent learning (RTRL), unbiased online recurrent optimization, decoupled neural interfaces and sparse 1-step approximation (SnAp-1). That enables estimating the future DVFs and, in turn, the next frames by warping the initial image. Linear regression led to the lowest mean DVF error at a horizon h = 0.32s (the time interval in advance for which the prediction is made), equal to 1.30mm, followed by SnAp-1 and RTRL, whose error increased from 1.37mm to 1.44mm as h increased from 0.62s to 2.20s. Similarly, the structural similarity index measure (SSIM) of LMS decreased from 0.904 to 0.898 as h increased from 0.31s to 1.57s and was the highest among the algorithms compared for the latter horizons. SnAp-1 attained the highest SSIM for h $\geq$ 1.88s, with values of less than 0.898. The predicted images look similar to the original ones, and the highest errors occurred at challenging areas such as the diaphragm boundary at the end-of-inhale phase, where motion variability is more prominent, and regions where out-of-plane motion was more prevalent.<|reference_end|> | arxiv | @article{pohl2024future,
title={Future frame prediction in chest cine MR imaging using the PCA
respiratory motion model and dynamically trained recurrent neural networks},
author={Michel Pohl, Mitsuru Uesaka, Hiroyuki Takahashi, Kazuyuki Demachi,
Ritu Bhusal Chhatkuli},
journal={arXiv preprint arXiv:2410.05882},
year={2024},
archivePrefix={arXiv},
eprint={2410.05882},
primaryClass={eess.IV cs.CV cs.LG cs.NE}
} | pohl2024future |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.