corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-667301
2410.06456
From Generalist to Specialist: Adapting Vision Language Models via Task-Specific Visual Instruction Tuning
<|reference_start|>From Generalist to Specialist: Adapting Vision Language Models via Task-Specific Visual Instruction Tuning: Large vision language models (VLMs) combine large language models with vision encoders, demonstrating promise across various tasks. However, they often underperform in task-specific applications due to domain gaps between pre-training and fine-tuning. We introduce VITask, a novel framework that enhances task-specific adaptability of VLMs by integrating task-specific models (TSMs). VITask employs three key strategies: exemplar prompting (EP), response distribution alignment (RDA), and contrastive response tuning (CRT) to improve the task-specific performance of VLMs by adjusting their response distributions. EP allows TSM features to guide VLMs, while RDA enables VLMs to adapt without TSMs during inference by learning from exemplar-prompted models. CRT further optimizes the ranking of correct image-response pairs, thereby reducing the risk of generating undesired responses. Experiments on 12 medical diagnosis datasets across 9 imaging modalities show that VITask outperforms both vanilla instruction-tuned VLMs and TSMs, showcasing its ability to integrate complementary features from both models effectively. Additionally, VITask offers practical advantages such as flexible TSM integration and robustness to incomplete instructions, making it a versatile and efficient solution for task-specific VLM tuning. Our code are available at https://github.com/baiyang4/VITask.<|reference_end|>
arxiv
@article{bai2024from, title={From Generalist to Specialist: Adapting Vision Language Models via Task-Specific Visual Instruction Tuning}, author={Yang Bai, Yang Zhou, Jun Zhou, Rick Siow Mong Goh, Daniel Shu Wei Ting, Yong Liu}, journal={arXiv preprint arXiv:2410.06456}, year={2024}, archivePrefix={arXiv}, eprint={2410.06456}, primaryClass={cs.CV} }
bai2024from
arxiv-667302
2410.06458
LLM Self-Correction with DeCRIM: Decompose, Critique, and Refine for Enhanced Following of Instructions with Multiple Constraints
<|reference_start|>LLM Self-Correction with DeCRIM: Decompose, Critique, and Refine for Enhanced Following of Instructions with Multiple Constraints: Instruction following is a key capability for LLMs. However, recent studies have shown that LLMs often struggle with instructions containing multiple constraints (e.g. a request to create a social media post "in a funny tone" with "no hashtag"). Despite this, most evaluations focus solely on synthetic data. To address this, we introduce RealInstruct, the first benchmark designed to evaluate LLMs' ability to follow real-world multi-constrained instructions by leveraging queries real users asked AI assistants. We also investigate model-based evaluation as a cost-effective alternative to human annotation for this task. Our findings reveal that even the proprietary GPT-4 model fails to meet at least one constraint on over 21% of instructions, highlighting the limitations of state-of-the-art models. To address the performance gap between open-source and proprietary models, we propose the Decompose, Critique and Refine (DeCRIM) self-correction pipeline, which enhances LLMs' ability to follow constraints. DeCRIM works by decomposing the original instruction into a list of constraints and using a Critic model to decide when and where the LLM's response needs refinement. Our results show that DeCRIM improves Mistral's performance by 7.3% on RealInstruct and 8.0% on IFEval even with weak feedback. Moreover, we demonstrate that with strong feedback, open-source LLMs with DeCRIM can outperform GPT-4 on both benchmarks.<|reference_end|>
arxiv
@article{ferraz2024llm, title={LLM Self-Correction with DeCRIM: Decompose, Critique, and Refine for Enhanced Following of Instructions with Multiple Constraints}, author={Thomas Palmeira Ferraz, Kartik Mehta, Yu-Hsiang Lin, Haw-Shiuan Chang, Shereen Oraby, Sijia Liu, Vivek Subramanian, Tagyoung Chung, Mohit Bansal, Nanyun Peng}, journal={arXiv preprint arXiv:2410.06458}, year={2024}, archivePrefix={arXiv}, eprint={2410.06458}, primaryClass={cs.CL cs.AI cs.LG} }
ferraz2024llm
arxiv-667303
2410.06459
Mamba-based Segmentation Model for Speaker Diarization
<|reference_start|>Mamba-based Segmentation Model for Speaker Diarization: Mamba is a newly proposed architecture which behaves like a recurrent neural network (RNN) with attention-like capabilities. These properties are promising for speaker diarization, as attention-based models have unsuitable memory requirements for long-form audio, and traditional RNN capabilities are too limited. In this paper, we propose to assess the potential of Mamba for diarization by comparing the state-of-the-art neural segmentation of the pyannote pipeline with our proposed Mamba-based variant. Mamba's stronger processing capabilities allow usage of longer local windows, which significantly improve diarization quality by making the speaker embedding extraction more reliable. We find Mamba to be a superior alternative to both traditional RNN and the tested attention-based model. Our proposed Mamba-based system achieves state-of-the-art performance on three widely used diarization datasets.<|reference_end|>
arxiv
@article{plaquet2024mamba-based, title={Mamba-based Segmentation Model for Speaker Diarization}, author={Alexis Plaquet, Naohiro Tawara, Marc Delcroix, Shota Horiguchi, Atsushi Ando, Shoko Araki}, journal={arXiv preprint arXiv:2410.06459}, year={2024}, archivePrefix={arXiv}, eprint={2410.06459}, primaryClass={cs.SD eess.AS} }
plaquet2024mamba-based
arxiv-667304
2410.06460
A Benchmark on Directed Graph Representation Learning in Hardware Designs
<|reference_start|>A Benchmark on Directed Graph Representation Learning in Hardware Designs: To keep pace with the rapid advancements in design complexity within modern computing systems, directed graph representation learning (DGRL) has become crucial, particularly for encoding circuit netlists, computational graphs, and developing surrogate models for hardware performance prediction. However, DGRL remains relatively unexplored, especially in the hardware domain, mainly due to the lack of comprehensive and user-friendly benchmarks. This study presents a novel benchmark comprising five hardware design datasets and 13 prediction tasks spanning various levels of circuit abstraction. We evaluate 21 DGRL models, employing diverse graph neural networks and graph transformers (GTs) as backbones, enhanced by positional encodings (PEs) tailored for directed graphs. Our results highlight that bidirected (BI) message passing neural networks (MPNNs) and robust PEs significantly enhance model performance. Notably, the top-performing models include PE-enhanced GTs interleaved with BI-MPNN layers and BI-Graph Isomorphism Network, both surpassing baselines across the 13 tasks. Additionally, our investigation into out-of-distribution (OOD) performance emphasizes the urgent need to improve OOD generalization in DGRL models. This benchmark, implemented with a modular codebase, streamlines the evaluation of DGRL models for both hardware and ML practitioners<|reference_end|>
arxiv
@article{wang2024a, title={A Benchmark on Directed Graph Representation Learning in Hardware Designs}, author={Haoyu Wang, Yinan Huang, Nan Wu, Pan Li}, journal={arXiv preprint arXiv:2410.06460}, year={2024}, archivePrefix={arXiv}, eprint={2410.06460}, primaryClass={cs.LG} }
wang2024a
arxiv-667305
2410.06462
Hallucinating AI Hijacking Attack: Large Language Models and Malicious Code Recommenders
<|reference_start|>Hallucinating AI Hijacking Attack: Large Language Models and Malicious Code Recommenders: The research builds and evaluates the adversarial potential to introduce copied code or hallucinated AI recommendations for malicious code in popular code repositories. While foundational large language models (LLMs) from OpenAI, Google, and Anthropic guard against both harmful behaviors and toxic strings, previous work on math solutions that embed harmful prompts demonstrate that the guardrails may differ between expert contexts. These loopholes would appear in mixture of expert's models when the context of the question changes and may offer fewer malicious training examples to filter toxic comments or recommended offensive actions. The present work demonstrates that foundational models may refuse to propose destructive actions correctly when prompted overtly but may unfortunately drop their guard when presented with a sudden change of context, like solving a computer programming challenge. We show empirical examples with trojan-hosting repositories like GitHub, NPM, NuGet, and popular content delivery networks (CDN) like jsDelivr which amplify the attack surface. In the LLM's directives to be helpful, example recommendations propose application programming interface (API) endpoints which a determined domain-squatter could acquire and setup attack mobile infrastructure that triggers from the naively copied code. We compare this attack to previous work on context-shifting and contrast the attack surface as a novel version of "living off the land" attacks in the malware literature. In the latter case, foundational language models can hijack otherwise innocent user prompts to recommend actions that violate their owners' safety policies when posed directly without the accompanying coding support request.<|reference_end|>
arxiv
@article{noever2024hallucinating, title={Hallucinating AI Hijacking Attack: Large Language Models and Malicious Code Recommenders}, author={David Noever and Forrest McKee}, journal={arXiv preprint arXiv:2410.06462}, year={2024}, archivePrefix={arXiv}, eprint={2410.06462}, primaryClass={cs.CR cs.AI} }
noever2024hallucinating
arxiv-667306
2410.06463
Average energy dissipation rates of additive implicit-explicit Runge-Kutta methods for gradient flow problems
<|reference_start|>Average energy dissipation rates of additive implicit-explicit Runge-Kutta methods for gradient flow problems: A unified theoretical framework is suggested to examine the energy dissipation properties at all stages of additive implicit-explicit Runge-Kutta (IERK) methods up to fourth-order accuracy for gradient flow problems. We construct some parameterized IERK methods by applying the so-called first same as last method, that is, the diagonally implicit Runge-Kutta method with the explicit first stage and stiffly-accurate assumption for the linear stiff term, and applying the explicit Runge-Kutta method for the nonlinear term. The main part of the novel framework is to construct the differential forms and the associated differentiation matrices of IERK methods by using the difference coefficients of method and the so-called discrete orthogonal convolution kernels. As the main result, we prove that an IERK method can preserve the original energy dissipation law unconditionally if the associated differentiation matrix is positive semi-definite. The recent indicator, namely average energy dissipation rate, is also adopted for these multi-stage methods to evaluate the overall energy dissipation rate of an IERK method such that one can choose proper parameters in some parameterized IERK methods. It is found that the selection of method parameters in the IERK methods is at least as important as the selection of different IERK methods. Extensive numerical experiments are also included to support our theory.<|reference_end|>
arxiv
@article{liao2024average, title={Average energy dissipation rates of additive implicit-explicit Runge-Kutta methods for gradient flow problems}, author={Hong-lin Liao, Xuping Wang, Cao Wen}, journal={arXiv preprint arXiv:2410.06463}, year={2024}, archivePrefix={arXiv}, eprint={2410.06463}, primaryClass={math.NA cs.NA} }
liao2024average
arxiv-667307
2410.06467
WAPITI: A Watermark for Finetuned Open-Source LLMs
<|reference_start|>WAPITI: A Watermark for Finetuned Open-Source LLMs: Watermarking of large language models (LLMs) generation embeds an imperceptible statistical pattern within texts, making it algorithmically detectable. Watermarking is a promising method for addressing potential harm and biases from LLMs, as it enables traceability, accountability, and detection of manipulated content, helping to mitigate unintended consequences. However, for open-source models, watermarking faces two major challenges: (i) incompatibility with fine-tuned models, and (ii) vulnerability to fine-tuning attacks. In this work, we propose WAPITI, a new method that transfers watermarking from base models to fine-tuned models through parameter integration. To the best of our knowledge, we propose the first watermark for fine-tuned open-source LLMs that preserves their fine-tuned capabilities. Furthermore, our approach offers an effective defense against fine-tuning attacks. We test our method on various model architectures and watermarking strategies. Results demonstrate that our method can successfully inject watermarks and is highly compatible with fine-tuned models. Additionally, we offer an in-depth analysis of how parameter editing influences the watermark strength and overall capabilities of the resulting models.<|reference_end|>
arxiv
@article{chen2024wapiti:, title={WAPITI: A Watermark for Finetuned Open-Source LLMs}, author={Lingjie Chen, Ruizhong Qiu, Siyu Yuan, Zhining Liu, Tianxin Wei, Hyunsik Yoo, Zhichen Zeng, Deqing Yang, Hanghang Tong}, journal={arXiv preprint arXiv:2410.06467}, year={2024}, archivePrefix={arXiv}, eprint={2410.06467}, primaryClass={cs.CR} }
chen2024wapiti:
arxiv-667308
2410.06468
Does Spatial Cognition Emerge in Frontier Models?
<|reference_start|>Does Spatial Cognition Emerge in Frontier Models?: Not yet. We present SPACE, a benchmark that systematically evaluates spatial cognition in frontier models. Our benchmark builds on decades of research in cognitive science. It evaluates large-scale mapping abilities that are brought to bear when an organism traverses physical environments, smaller-scale reasoning about object shapes and layouts, and cognitive infrastructure such as spatial attention and memory. For many tasks, we instantiate parallel presentations via text and images, allowing us to benchmark both large language models and large multimodal models. Results suggest that contemporary frontier models fall short of the spatial intelligence of animals, performing near chance level on a number of classic tests of animal cognition.<|reference_end|>
arxiv
@article{ramakrishnan2024does, title={Does Spatial Cognition Emerge in Frontier Models?}, author={Santhosh Kumar Ramakrishnan, Erik Wijmans, Philipp Kraehenbuehl, Vladlen Koltun}, journal={arXiv preprint arXiv:2410.06468}, year={2024}, archivePrefix={arXiv}, eprint={2410.06468}, primaryClass={cs.AI cs.CV cs.LG} }
ramakrishnan2024does
arxiv-667309
2410.06469
Hybrid Fusion for Battery Degradation Diagnostics Using Minimal Real-World Data: Bridging Laboratory and Practical Applications
<|reference_start|>Hybrid Fusion for Battery Degradation Diagnostics Using Minimal Real-World Data: Bridging Laboratory and Practical Applications: Unpredictability of battery lifetime has been a key stumbling block to technology advancement of safety-critical systems such as electric vehicles and stationary energy storage systems. In this work, we present a novel hybrid fusion strategy that combines physics-based and data-driven approaches to accurately predict battery capacity. This strategy achieves an average estimation error of only 0.63% over the entire battery lifespan, utilizing merely 45 real-world data segments along with over 1.7 million simulated data segments derived from random partial charging cycles. By leveraging a thoroughly validated physics-based battery model, we extract typical aging patterns from laboratory aging data and extend them into a more comprehensive parameter space, encompassing diverse battery aging states in potential real-world applications while accounting for practical cell-to-cell variations. By bridging the gap between controlled laboratory experiments and real-world usage scenarios, this method highlights the significant potential of transferring underlying knowledge from high-fidelity physics-based models to data-driven models for predicting the behavior of complex dynamical systems.<|reference_end|>
arxiv
@article{liu2024hybrid, title={Hybrid Fusion for Battery Degradation Diagnostics Using Minimal Real-World Data: Bridging Laboratory and Practical Applications}, author={Yisheng Liu, Boru Zhou, Tengwei Pang, Guodong Fan, Xi Zhang}, journal={arXiv preprint arXiv:2410.06469}, year={2024}, archivePrefix={arXiv}, eprint={2410.06469}, primaryClass={cs.CE} }
liu2024hybrid
arxiv-667310
2410.06471
Cost-Effective Cyber-Physical System Prototype for Precision Agriculture with a Focus on Crop Growth
<|reference_start|>Cost-Effective Cyber-Physical System Prototype for Precision Agriculture with a Focus on Crop Growth: In precision agriculture, integrating advanced technologies is crucial for optimizing plant growth and health monitoring. Cyber-physical system (CPS) platforms tailored to specific agricultural environments have emerged, but the diversity of these environments poses challenges in developing adaptive CPS platforms. This paper explores rapid prototyping methods to address these challenges, focusing on non-destructive techniques for estimating plant growth. We present a CPS prototype that combines sensors, microcontrollers, digital image processing, and predictive modeling to measure leaf area and biomass accumulation in hydroponic environments. Our results show that the prototype effectively monitors and predicts plant growth, highlighting the potential of rapid CPS prototyping in promoting sustainability and improving crop yields at a moderate cost of hardware.<|reference_end|>
arxiv
@article{kumar2024cost-effective, title={Cost-Effective Cyber-Physical System Prototype for Precision Agriculture with a Focus on Crop Growth}, author={Pawan Kumar, Hokeun Kim}, journal={arXiv preprint arXiv:2410.06471}, year={2024}, archivePrefix={arXiv}, eprint={2410.06471}, primaryClass={eess.SY cs.SY} }
kumar2024cost-effective
arxiv-667311
2410.06472
Enabling Novel Mission Operations and Interactions with ROSA: The Robot Operating System Agent
<|reference_start|>Enabling Novel Mission Operations and Interactions with ROSA: The Robot Operating System Agent: The advancement of robotic systems has revolutionized numerous industries, yet their operation often demands specialized technical knowledge, limiting accessibility for non-expert users. This paper introduces ROSA (Robot Operating System Agent), an AI-powered agent that bridges the gap between the Robot Operating System (ROS) and natural language interfaces. By leveraging state-of-the-art language models and integrating open-source frameworks, ROSA enables operators to interact with robots using natural language, translating commands into actions and interfacing with ROS through well-defined tools. ROSA's design is modular and extensible, offering seamless integration with both ROS1 and ROS2, along with safety mechanisms like parameter validation and constraint enforcement to ensure secure, reliable operations. While ROSA is originally designed for ROS, it can be extended to work with other robotics middle-wares to maximize compatibility across missions. ROSA enhances human-robot interaction by democratizing access to complex robotic systems, empowering users of all expertise levels with multi-modal capabilities such as speech integration and visual perception. Ethical considerations are thoroughly addressed, guided by foundational principles like Asimov's Three Laws of Robotics, ensuring that AI integration promotes safety, transparency, privacy, and accountability. By making robotic technology more user-friendly and accessible, ROSA not only improves operational efficiency but also sets a new standard for responsible AI use in robotics and potentially future mission operations. This paper introduces ROSA's architecture and showcases initial mock-up operations in JPL's Mars Yard, a laboratory, and a simulation using three different robots. The core ROSA library is available as open-source.<|reference_end|>
arxiv
@article{royce2024enabling, title={Enabling Novel Mission Operations and Interactions with ROSA: The Robot Operating System Agent}, author={Rob Royce, Marcel Kaufmann, Jonathan Becktor, Sangwoo Moon, Kalind Carpenter, Kai Pak, Amanda Towler, Rohan Thakker, Shehryar Khattak}, journal={arXiv preprint arXiv:2410.06472}, year={2024}, archivePrefix={arXiv}, eprint={2410.06472}, primaryClass={cs.RO cs.AI cs.HC} }
royce2024enabling
arxiv-667312
2410.06473
Grounding Robot Policies with Visuomotor Language Guidance
<|reference_start|>Grounding Robot Policies with Visuomotor Language Guidance: Recent advances in the fields of natural language processing and computer vision have shown great potential in understanding the underlying dynamics of the world from large-scale internet data. However, translating this knowledge into robotic systems remains an open challenge, given the scarcity of human-robot interactions and the lack of large-scale datasets of real-world robotic data. Previous robot learning approaches such as behavior cloning and reinforcement learning have shown great capabilities in learning robotic skills from human demonstrations or from scratch in specific environments. However, these approaches often require task-specific demonstrations or designing complex simulation environments, which limits the development of generalizable and robust policies for new settings. Aiming to address these limitations, we propose an agent-based framework for grounding robot policies to the current context, considering the constraints of a current robot and its environment using visuomotor-grounded language guidance. The proposed framework is composed of a set of conversational agents designed for specific roles -- namely, high-level advisor, visual grounding, monitoring, and robotic agents. Given a base policy, the agents collectively generate guidance at run time to shift the action distribution of the base policy towards more desirable future states. We demonstrate that our approach can effectively guide manipulation policies to achieve significantly higher success rates both in simulation and in real-world experiments without the need for additional human demonstrations or extensive exploration. Project videos at https://sites.google.com/view/motorcortex/home.<|reference_end|>
arxiv
@article{bucker2024grounding, title={Grounding Robot Policies with Visuomotor Language Guidance}, author={Arthur Bucker, Pablo Ortega-Kral, Jonathan Francis, Jean Oh}, journal={arXiv preprint arXiv:2410.06473}, year={2024}, archivePrefix={arXiv}, eprint={2410.06473}, primaryClass={cs.RO cs.AI} }
bucker2024grounding
arxiv-667313
2410.06474
Flipping-based Policy for Chance-Constrained Markov Decision Processes
<|reference_start|>Flipping-based Policy for Chance-Constrained Markov Decision Processes: Safe reinforcement learning (RL) is a promising approach for many real-world decision-making problems where ensuring safety is a critical necessity. In safe RL research, while expected cumulative safety constraints (ECSCs) are typically the first choices, chance constraints are often more pragmatic for incorporating safety under uncertainties. This paper proposes a \textit{flipping-based policy} for Chance-Constrained Markov Decision Processes (CCMDPs). The flipping-based policy selects the next action by tossing a potentially distorted coin between two action candidates. The probability of the flip and the two action candidates vary depending on the state. We establish a Bellman equation for CCMDPs and further prove the existence of a flipping-based policy within the optimal solution sets. Since solving the problem with joint chance constraints is challenging in practice, we then prove that joint chance constraints can be approximated into Expected Cumulative Safety Constraints (ECSCs) and that there exists a flipping-based policy in the optimal solution sets for constrained MDPs with ECSCs. As a specific instance of practical implementations, we present a framework for adapting constrained policy optimization to train a flipping-based policy. This framework can be applied to other safe RL algorithms. We demonstrate that the flipping-based policy can improve the performance of the existing safe RL algorithms under the same limits of safety constraints on Safety Gym benchmarks.<|reference_end|>
arxiv
@article{shen2024flipping-based, title={Flipping-based Policy for Chance-Constrained Markov Decision Processes}, author={Xun Shen, Shuo Jiang, Akifumi Wachi, Kaumune Hashimoto, Sebastien Gros}, journal={arXiv preprint arXiv:2410.06474}, year={2024}, archivePrefix={arXiv}, eprint={2410.06474}, primaryClass={cs.LG math.OC} }
shen2024flipping-based
arxiv-667314
2410.06475
3D Representation Methods: A Survey
<|reference_start|>3D Representation Methods: A Survey: The field of 3D representation has experienced significant advancements, driven by the increasing demand for high-fidelity 3D models in various applications such as computer graphics, virtual reality, and autonomous systems. This review examines the development and current state of 3D representation methods, highlighting their research trajectories, innovations, strength and weakness. Key techniques such as Voxel Grid, Point Cloud, Mesh, Signed Distance Function (SDF), Neural Radiance Field (NeRF), 3D Gaussian Splatting, Tri-Plane, and Deep Marching Tetrahedra (DMTet) are reviewed. The review also introduces essential datasets that have been pivotal in advancing the field, highlighting their characteristics and impact on research progress. Finally, we explore potential research directions that hold promise for further expanding the capabilities and applications of 3D representation methods.<|reference_end|>
arxiv
@article{wang20243d, title={3D Representation Methods: A Survey}, author={Zhengren Wang}, journal={arXiv preprint arXiv:2410.06475}, year={2024}, archivePrefix={arXiv}, eprint={2410.06475}, primaryClass={cs.CV} }
wang20243d
arxiv-667315
2410.06476
Quantitative Theory of Meaning Application to Financial Markets EUR/USD case study
<|reference_start|>Quantitative Theory of Meaning Application to Financial Markets EUR/USD case study: The paper focuses on the link between information, investors' expectations and market price movement. EUR/USD market is examined from communication-theoretical perspective on the dynamics of information and meaning. We build upon the quantitative theory of meaning as a complement to the quantitative theory of information. Different groups of investors entertain different criteria to process information, so that the same information can be supplied with different meanings. Meanings shape investors' expectations which are revealed in market asset price movement. This dynamics can be captured by non-linear evolutionary equation. We use a computationally efficient technique of logistic Continuous Wavelet Transformation (CWT) to analyze EUR/USD market. The results reveal the latent EUR/USD trend structure which coincides with the model predicted time series indicating that proposed model can adequately describe some patterns of investors' behavior. Proposed methodology can be used to better understand and forecast future market assets' price movement.<|reference_end|>
arxiv
@article{ivanova2024quantitative, title={Quantitative Theory of Meaning. Application to Financial Markets. EUR/USD case study}, author={Inga Ivanova, Grzegorz Rzadkowski and Loet Leydesdorff}, journal={arXiv preprint arXiv:2410.06476}, year={2024}, archivePrefix={arXiv}, eprint={2410.06476}, primaryClass={cs.CY} }
ivanova2024quantitative
arxiv-667316
2410.06477
Complete pivoting growth of butterfly matrices and butterfly Hadamard matrices
<|reference_start|>Complete pivoting growth of butterfly matrices and butterfly Hadamard matrices: The growth problem in Gaussian elimination (GE) remains a foundational question in numerical analysis and numerical linear algebra. Wilkinson resolved the growth problem in GE with partial pivoting (GEPP) in his initial analysis from the 1960s, while he was only able to establish an upper bound for the GE with complete pivoting (GECP) growth problem. The GECP growth problem has seen a spike in recent interest, culminating in improved lower and upper bounds established by Bisain, Edelman, and Urschel in 2023, but still remains far from being fully resolved. Due to the complex dynamics governing the location of GECP pivots, analysis of GECP growth for particular input matrices often estimates the actual growth rather than computes the growth exactly. We present a class of dense random butterfly matrices on which we can present the exact GECP growth. We extend previous results that established exact growth computations for butterfly matrices when using GEPP and GE with rook pivoting (GERP) to now also include GECP for particular input matrices. Moreover, we present a new method to construct random Hadamard matrices using butterfly matrices.<|reference_end|>
arxiv
@article{peca-medlin2024complete, title={Complete pivoting growth of butterfly matrices and butterfly Hadamard matrices}, author={John Peca-Medlin}, journal={arXiv preprint arXiv:2410.06477}, year={2024}, archivePrefix={arXiv}, eprint={2410.06477}, primaryClass={math.NA cs.NA math.PR} }
peca-medlin2024complete
arxiv-667317
2410.06478
MaskBlur: Spatial and Angular Data Augmentation for Light Field Image Super-Resolution
<|reference_start|>MaskBlur: Spatial and Angular Data Augmentation for Light Field Image Super-Resolution: Data augmentation (DA) is an effective approach for enhancing model performance with limited data, such as light field (LF) image super-resolution (SR). LF images inherently possess rich spatial and angular information. Nonetheless, there is a scarcity of DA methodologies explicitly tailored for LF images, and existing works tend to concentrate solely on either the spatial or angular domain. This paper proposes a novel spatial and angular DA strategy named MaskBlur for LF image SR by concurrently addressing spatial and angular aspects. MaskBlur consists of spatial blur and angular dropout two components. Spatial blur is governed by a spatial mask, which controls where pixels are blurred, i.e., pasting pixels between the low-resolution and high-resolution domains. The angular mask is responsible for angular dropout, i.e., selecting which views to perform the spatial blur operation. By doing so, MaskBlur enables the model to treat pixels differently in the spatial and angular domains when super-resolving LF images rather than blindly treating all pixels equally. Extensive experiments demonstrate the efficacy of MaskBlur in significantly enhancing the performance of existing SR methods. We further extend MaskBlur to other LF image tasks such as denoising, deblurring, low-light enhancement, and real-world SR. Code is publicly available at \url{https://github.com/chaowentao/MaskBlur}.<|reference_end|>
arxiv
@article{chao2024maskblur:, title={MaskBlur: Spatial and Angular Data Augmentation for Light Field Image Super-Resolution}, author={Wentao Chao, Fuqing Duan, Yulan Guo, Guanghui Wang}, journal={arXiv preprint arXiv:2410.06478}, year={2024}, archivePrefix={arXiv}, eprint={2410.06478}, primaryClass={eess.IV cs.CV} }
chao2024maskblur:
arxiv-667318
2410.06479
LLM Compression with Neural Architecture Search
<|reference_start|>LLM Compression with Neural Architecture Search: Large language models (LLMs) exhibit remarkable reasoning abilities, allowing them to generalize across a wide range of downstream tasks, such as commonsense reasoning or instruction following. However, as LLMs scale, inference costs become increasingly prohibitive, accumulating significantly over their life cycle. This poses the question: Can we compress pre-trained LLMs to meet diverse size and latency requirements? We leverage Neural Architecture Search (NAS) to compress LLMs by pruning structural components, such as attention heads, neurons, and layers, aiming to achieve a Pareto-optimal balance between performance and efficiency. While NAS already achieved promising results on small language models in previous work, in this paper we propose various extensions that allow us to scale to LLMs. Compared to structural pruning baselines, we show that NAS improves performance up to 3.4% on MMLU with an on-device latency speedup.<|reference_end|>
arxiv
@article{sukthanker2024large, title={Large Language Model Compression with Neural Architecture Search}, author={Rhea Sanjay Sukthanker, Benedikt Staffler, Frank Hutter, Aaron Klein}, journal={arXiv preprint arXiv:2410.06479}, year={2024}, archivePrefix={arXiv}, eprint={2410.06479}, primaryClass={cs.CL} }
sukthanker2024large
arxiv-667319
2410.06480
TCGU: Data-centric Graph Unlearning based on Transferable Condensation
<|reference_start|>TCGU: Data-centric Graph Unlearning based on Transferable Condensation: With growing demands for data privacy and model robustness, graph unlearning (GU), which erases the influence of specific data on trained GNN models, has gained significant attention. However, existing exact unlearning methods suffer from either low efficiency or poor model performance. While being more utility-preserving and efficient, current approximate unlearning methods are not applicable in the zero-glance privacy setting, where the deleted samples cannot be accessed during unlearning due to immediate deletion requested by regulations. Besides, these approximate methods, which try to directly perturb model parameters still involve high privacy concerns in practice. To fill the gap, we propose Transferable Condensation Graph Unlearning (TCGU), a data-centric solution to zero-glance graph unlearning. Specifically, we first design a two-level alignment strategy to pre-condense the original graph into a small yet utility-preserving dataset. Upon receiving an unlearning request, we fine-tune the pre-condensed data with a low-rank plugin, to directly align its distribution with the remaining graph, thus efficiently revoking the information of deleted data without accessing them. A novel similarity distribution matching approach and a discrimination regularizer are proposed to effectively transfer condensed data and preserve its utility in GNN training, respectively. Finally, we retrain the GNN on the transferred condensed data. Extensive experiments on 6 benchmark datasets demonstrate that TCGU can achieve superior performance in terms of model utility, unlearning efficiency, and unlearning efficacy than existing GU methods.<|reference_end|>
arxiv
@article{li2024tcgu:, title={TCGU: Data-centric Graph Unlearning based on Transferable Condensation}, author={Fan Li, Xiaoyang Wang, Dawei Cheng, Wenjie Zhang, Ying Zhang, and Xuemin Lin}, journal={arXiv preprint arXiv:2410.06480}, year={2024}, archivePrefix={arXiv}, eprint={2410.06480}, primaryClass={cs.LG} }
li2024tcgu:
arxiv-667320
2410.06481
Leaf Stripping on Uniform Attachment Trees
<|reference_start|>Leaf Stripping on Uniform Attachment Trees: In this note we analyze the performance of a simple root-finding algorithm in uniform attachment trees. The leaf-stripping algorithm recursively removes all leaves of the tree for a carefully chosen number of rounds. We show that, with probability $1 - \epsilon$, the set of remaining vertices contains the root and has a size only depending on $\epsilon$ but not on the size of the tree.<|reference_end|>
arxiv
@article{addario-berry2024leaf, title={Leaf Stripping on Uniform Attachment Trees}, author={Louigi Addario-Berry, Anna Brandenberger, Simon Briend, Nicolas Broutin, G'abor Lugosi}, journal={arXiv preprint arXiv:2410.06481}, year={2024}, archivePrefix={arXiv}, eprint={2410.06481}, primaryClass={math.PR cs.DS cs.LG stat.ML} }
addario-berry2024leaf
arxiv-667321
2410.06482
OledFL: Unleashing the Potential of Decentralized Federated Learning via Opposite Lookahead Enhancement
<|reference_start|>OledFL: Unleashing the Potential of Decentralized Federated Learning via Opposite Lookahead Enhancement: Decentralized Federated Learning (DFL) surpasses Centralized Federated Learning (CFL) in terms of faster training, privacy preservation, and light communication, making it a promising alternative in the field of federated learning. However, DFL still exhibits significant disparities with CFL in terms of generalization ability such as rarely theoretical understanding and degraded empirical performance due to severe inconsistency. In this paper, we enhance the consistency of DFL by developing an opposite lookahead enhancement technique (Ole), yielding OledFL to optimize the initialization of each client in each communication round, thus significantly improving both the generalization and convergence speed. Moreover, we rigorously establish its convergence rate in non-convex setting and characterize its generalization bound through uniform stability, which provides concrete reasons why OledFL can achieve both the fast convergence speed and high generalization ability. Extensive experiments conducted on the CIFAR10 and CIFAR100 datasets with Dirichlet and Pathological distributions illustrate that our OledFL can achieve up to 5\% performance improvement and 8$\times$ speedup, compared to the most popular DFedAvg optimizer in DFL.<|reference_end|>
arxiv
@article{li2024oledfl:, title={OledFL: Unleashing the Potential of Decentralized Federated Learning via Opposite Lookahead Enhancement}, author={Qinglun Li, Miao Zhang, Mengzhu Wang, Quanjun Yin and Li Shen}, journal={arXiv preprint arXiv:2410.06482}, year={2024}, archivePrefix={arXiv}, eprint={2410.06482}, primaryClass={cs.LG cs.AI} }
li2024oledfl:
arxiv-667322
2410.06483
Deep Learning Ensemble for Predicting Diabetic Macular Edema Onset Using Ultra-Wide Field Color Fundus Image
<|reference_start|>Deep Learning Ensemble for Predicting Diabetic Macular Edema Onset Using Ultra-Wide Field Color Fundus Image: Diabetic macular edema (DME) is a severe complication of diabetes, characterized by thickening of the central portion of the retina due to accumulation of fluid. DME is a significant and common cause of visual impairment in diabetic patients. Center-involved DME (ci-DME) is the highest risk form of disease as fluid extends close to the fovea which is responsible for sharp central vision. Earlier diagnosis or prediction of ci-DME may improve treatment outcomes. Here, we propose an ensemble method to predict ci-DME onset within a year using ultra-wide-field color fundus photography (UWF-CFP) images provided by the DIAMOND Challenge. We adopted a variety of baseline state-of-the-art classification networks including ResNet, DenseNet, EfficientNet, and VGG with the aim of enhancing model robustness. The best performing models were Densenet 121, Resnet 152 and EfficientNet b7, and these were assembled into a definitive predictive model. The final ensemble model demonstrates a strong performance with an Area Under Curve (AUC) of 0.7017, an F1 score of 0.6512, and an Expected Calibration Error (ECE) of 0.2057 when deployed on a synthetic dataset. The performance of this ensemble model is comparable to previous studies despite training and testing in a more realistic setting, indicating the potential of UWF-CFP combined with a deep learning classification system to facilitate earlier diagnosis, better treatment decisions, and improved prognostication in ci-DME.<|reference_end|>
arxiv
@article{qin2024deep, title={Deep Learning Ensemble for Predicting Diabetic Macular Edema Onset Using Ultra-Wide Field Color Fundus Image}, author={Pengyao Qin, Arun J. Thirunavukarasu, and Le Zhang}, journal={arXiv preprint arXiv:2410.06483}, year={2024}, archivePrefix={arXiv}, eprint={2410.06483}, primaryClass={eess.IV cs.AI cs.CV} }
qin2024deep
arxiv-667323
2410.06485
A Decomposition Approach to the Weighted $k$-server Problem
<|reference_start|>A Decomposition Approach to the Weighted $k$-server Problem: A natural variant of the classical online $k$-server problem is the Weighted $k$-server problem, where the cost of moving a server is its weight times the distance through which it moves. Despite its apparent simplicity, the weighted $k$-server problem is extremely poorly understood. Specifically, even on uniform metric spaces, finding the optimum competitive ratio of randomized algorithms remains an open problem -- the best upper bound known is $2^{2^{k+O(1)}}$ due to a deterministic algorithm (Bansal et al., 2018), and the best lower bound known is $\Omega(2^k)$ (Ayyadevara and Chiplunkar, 2021). With the aim of closing this exponential gap between the upper and lower bounds, we propose a decomposition approach for designing a randomized algorithm for weighted $k$-server on uniform metrics. Our first contribution includes two relaxed versions of the problem and a technique to obtain an algorithm for weighted $k$-server from algorithms for the two relaxed versions. Specifically, we prove that if there exists an $\alpha_1$-competitive algorithm for one version (which we call Weighted $k$-Server - Service Pattern Construction (W$k$S-SPC) and there exists an $\alpha_2$-competitive algorithm for the other version (which we call Weighted $k$-server - Revealed Service Pattern (W$k$S-RSP)), then there exists an $(\alpha_1\alpha_2)$-competitive algorithm for weighted $k$-server on uniform metric spaces. Our second contribution is a $2^{O(k^2)}$-competitive randomized algorithm for W$k$S-RSP. As a consequence, the task of designing a $2^{poly(k)}$-competitive randomized algorithm for weighted $k$-server on uniform metrics reduces to designing a $2^{poly(k)}$-competitive randomized algorithm for W$k$S-SPC. Finally, we also prove that the $\Omega(2^k)$ lower bound for weighted $k$-server, in fact, holds for W$k$S-RSP.<|reference_end|>
arxiv
@article{ayyadevara2024a, title={A Decomposition Approach to the Weighted $k$-server Problem}, author={Nikhil Ayyadevara, Ashish Chiplunkar, Amatya Sharma}, journal={arXiv preprint arXiv:2410.06485}, year={2024}, archivePrefix={arXiv}, eprint={2410.06485}, primaryClass={cs.DS cs.CC cs.DM} }
ayyadevara2024a
arxiv-667324
2410.06488
HFH-Font: Few-shot Chinese Font Synthesis with Higher Quality, Faster Speed, and Higher Resolution
<|reference_start|>HFH-Font: Few-shot Chinese Font Synthesis with Higher Quality, Faster Speed, and Higher Resolution: The challenge of automatically synthesizing high-quality vector fonts, particularly for writing systems (e.g., Chinese) consisting of huge amounts of complex glyphs, remains unsolved. Existing font synthesis techniques fall into two categories: 1) methods that directly generate vector glyphs, and 2) methods that initially synthesize glyph images and then vectorize them. However, the first category often fails to construct complete and correct shapes for complex glyphs, while the latter struggles to efficiently synthesize high-resolution (i.e., 1024 $\times$ 1024 or higher) glyph images while preserving local details. In this paper, we introduce HFH-Font, a few-shot font synthesis method capable of efficiently generating high-resolution glyph images that can be converted into high-quality vector glyphs. More specifically, our method employs a diffusion model-based generative framework with component-aware conditioning to learn different levels of style information adaptable to varying input reference sizes. We also design a distillation module based on Score Distillation Sampling for 1-step fast inference, and a style-guided super-resolution module to refine and upscale low-resolution synthesis results. Extensive experiments, including a user study with professional font designers, have been conducted to demonstrate that our method significantly outperforms existing font synthesis approaches. Experimental results show that our method produces high-fidelity, high-resolution raster images which can be vectorized into high-quality vector fonts. Using our method, for the first time, large-scale Chinese vector fonts of a quality comparable to those manually created by professional font designers can be automatically generated.<|reference_end|>
arxiv
@article{li2024hfh-font:, title={HFH-Font: Few-shot Chinese Font Synthesis with Higher Quality, Faster Speed, and Higher Resolution}, author={Hua Li, Zhouhui Lian}, journal={arXiv preprint arXiv:2410.06488}, year={2024}, archivePrefix={arXiv}, eprint={2410.06488}, primaryClass={cs.CV} }
li2024hfh-font:
arxiv-667325
2410.06490
FedL2G: Learning to Guide Local Training in Heterogeneous Federated Learning
<|reference_start|>FedL2G: Learning to Guide Local Training in Heterogeneous Federated Learning: Data and model heterogeneity are two core issues in Heterogeneous Federated Learning (HtFL). In scenarios with heterogeneous model architectures, aggregating model parameters becomes infeasible, leading to the use of prototypes (i.e., class representative feature vectors) for aggregation and guidance. However, they still experience a mismatch between the extra guiding objective and the client's original local objective when aligned with global prototypes. Thus, we propose a Federated Learning-to-Guide (FedL2G) method that adaptively learns to guide local training in a federated manner and ensures the extra guidance is beneficial to clients' original tasks. With theoretical guarantees, FedL2G efficiently implements the learning-to-guide process using only first-order derivatives w.r.t. model parameters and achieves a non-convex convergence rate of O(1/T). We conduct extensive experiments on two data heterogeneity and six model heterogeneity settings using 14 heterogeneous model architectures (e.g., CNNs and ViTs) to demonstrate FedL2G's superior performance compared to six counterparts.<|reference_end|>
arxiv
@article{zhang2024fedl2g:, title={FedL2G: Learning to Guide Local Training in Heterogeneous Federated Learning}, author={Jianqing Zhang, Yang Liu, Yang Hua, Jian Cao, Qiang Yang}, journal={arXiv preprint arXiv:2410.06490}, year={2024}, archivePrefix={arXiv}, eprint={2410.06490}, primaryClass={cs.LG cs.AI} }
zhang2024fedl2g:
arxiv-667326
2410.06491
Honesty to Subterfuge: In-Context Reinforcement Learning Can Make Honest Models Reward Hack
<|reference_start|>Honesty to Subterfuge: In-Context Reinforcement Learning Can Make Honest Models Reward Hack: Previous work has shown that training "helpful-only" LLMs with reinforcement learning on a curriculum of gameable environments can lead models to generalize to egregious specification gaming, such as editing their own reward function or modifying task checklists to appear more successful. We show that gpt-4o, gpt-4o-mini, o1-preview, and o1-mini - frontier models trained to be helpful, harmless, and honest - can engage in specification gaming without training on a curriculum of tasks, purely from in-context iterative reflection (which we call in-context reinforcement learning, "ICRL"). We also show that using ICRL to generate highly-rewarded outputs for expert iteration (compared to the standard expert iteration reinforcement learning algorithm) may increase gpt-4o-mini's propensity to learn specification-gaming policies, generalizing (in very rare cases) to the most egregious strategy where gpt-4o-mini edits its own reward function. Our results point toward the strong ability of in-context reflection to discover rare specification-gaming strategies that models might not exhibit zero-shot or with normal training, highlighting the need for caution when relying on alignment of LLMs in zero-shot settings.<|reference_end|>
arxiv
@article{mckee-reid2024honesty, title={Honesty to Subterfuge: In-Context Reinforcement Learning Can Make Honest Models Reward Hack}, author={Leo McKee-Reid, Christoph Str"ater, Maria Angelica Martinez, Joe Needham, Mikita Balesni}, journal={arXiv preprint arXiv:2410.06491}, year={2024}, archivePrefix={arXiv}, eprint={2410.06491}, primaryClass={cs.AI cs.LG} }
mckee-reid2024honesty
arxiv-667327
2410.06492
Overcoming Autoware-Ubuntu Incompatibility in Autonomous Driving Systems-Equipped Vehicles: Lessons Learned
<|reference_start|>Overcoming Autoware-Ubuntu Incompatibility in Autonomous Driving Systems-Equipped Vehicles: Lessons Learned: Autonomous vehicles have been rapidly developed as demand that provides safety and efficiency in transportation systems. As autonomous vehicles are designed based on open-source operating and computing systems, there are numerous resources aimed at building an operating platform composed of Ubuntu, Autoware, and Robot Operating System (ROS). However, no explicit guidelines exist to help scholars perform trouble-shooting due to incompatibility between the Autoware platform and Ubuntu operating systems installed in autonomous driving systems-equipped vehicles (i.e., Chrysler Pacifica). The paper presents an overview of integrating the Autoware platform into the autonomous vehicle's interface based on lessons learned from trouble-shooting processes for resolving incompatible issues. The trouble-shooting processes are presented based on resolving the incompatibility and integration issues of Ubuntu 20.04, Autoware.AI, and ROS Noetic software installed in an autonomous driving systems-equipped vehicle. Specifically, the paper focused on common incompatibility issues and code-solving protocols involving Python compatibility, Compute Unified Device Architecture (CUDA) installation, Autoware installation, and simulation in Autoware.AI. The objective of the paper is to provide an explicit and detail-oriented presentation to showcase how to address incompatibility issues among an autonomous vehicle's operating interference. The lessons and experience presented in the paper will be useful for researchers who encountered similar issues and could follow up by performing trouble-shooting activities and implementing ADS-related projects in the Ubuntu, Autoware, and ROS operating systems.<|reference_end|>
arxiv
@article{zhang2024overcoming, title={Overcoming Autoware-Ubuntu Incompatibility in Autonomous Driving Systems-Equipped Vehicles: Lessons Learned}, author={Dada Zhang and Md Ruman Islam and Pei-Chi Huang and Chun-Hsing Ho}, journal={arXiv preprint arXiv:2410.06492}, year={2024}, archivePrefix={arXiv}, eprint={2410.06492}, primaryClass={cs.RO cs.OS cs.SE} }
zhang2024overcoming
arxiv-667328
2410.06493
BiC-MPPI: Goal-Pursuing, Sampling-Based Bidirectional Rollout Clustering Path Integral for Trajectory Optimization
<|reference_start|>BiC-MPPI: Goal-Pursuing, Sampling-Based Bidirectional Rollout Clustering Path Integral for Trajectory Optimization: This paper introduces the Bidirectional Clustered MPPI (BiC-MPPI) algorithm, a novel trajectory optimization method aimed at enhancing goal-directed guidance within the Model Predictive Path Integral (MPPI) framework. BiC-MPPI incorporates bidirectional dynamics approximations and a new guide cost mechanism, improving both trajectory planning and goal-reaching performance. By leveraging forward and backward rollouts, the bidirectional approach ensures effective trajectory connections between initial and terminal states, while the guide cost helps discover dynamically feasible paths. Experimental results demonstrate that BiC-MPPI outperforms existing MPPI variants in both 2D and 3D environments, achieving higher success rates and competitive computation times across 900 simulations on a modified BARN dataset for autonomous navigation. GitHub: https://github.com/i-ASL/BiC-MPPI<|reference_end|>
arxiv
@article{jung2024bic-mppi:, title={BiC-MPPI: Goal-Pursuing, Sampling-Based Bidirectional Rollout Clustering Path Integral for Trajectory Optimization}, author={Minchan Jung and Kwangki Kim}, journal={arXiv preprint arXiv:2410.06493}, year={2024}, archivePrefix={arXiv}, eprint={2410.06493}, primaryClass={cs.RO cs.AI cs.SY eess.SY math.OC} }
jung2024bic-mppi:
arxiv-667329
2410.06494
Conformal Prediction: A Data Perspective
<|reference_start|>Conformal Prediction: A Data Perspective: Conformal prediction (CP), a distribution-free uncertainty quantification (UQ) framework, reliably provides valid predictive inference for black-box models. CP constructs prediction sets that contain the true output with a specified probability. However, modern data science diverse modalities, along with increasing data and model complexity, challenge traditional CP methods. These developments have spurred novel approaches to address evolving scenarios. This survey reviews the foundational concepts of CP and recent advancements from a data-centric perspective, including applications to structured, unstructured, and dynamic data. We also discuss the challenges and opportunities CP faces in large-scale data and models.<|reference_end|>
arxiv
@article{zhou2024conformal, title={Conformal Prediction: A Data Perspective}, author={Xiaofan Zhou, Baiting Chen, Yu Gui, Lu Cheng}, journal={arXiv preprint arXiv:2410.06494}, year={2024}, archivePrefix={arXiv}, eprint={2410.06494}, primaryClass={cs.LG} }
zhou2024conformal
arxiv-667330
2410.06496
On the Similarity of Circuits across Languages: a Case Study on the Subject-verb Agreement Task
<|reference_start|>On the Similarity of Circuits across Languages: a Case Study on the Subject-verb Agreement Task: Several algorithms implemented by language models have recently been successfully reversed-engineered. However, these findings have been concentrated on specific tasks and models, leaving it unclear how universal circuits are across different settings. In this paper, we study the circuits implemented by Gemma 2B for solving the subject-verb agreement task across two different languages, English and Spanish. We discover that both circuits are highly consistent, being mainly driven by a particular attention head writing a `subject number' signal to the last residual stream, which is read by a small set of neurons in the final MLPs. Notably, this subject number signal is represented as a direction in the residual stream space, and is language-independent. We demonstrate that this direction has a causal effect on the model predictions, effectively flipping the Spanish predicted verb number by intervening with the direction found in English. Finally, we present evidence of similar behavior in other models within the Gemma 1 and Gemma 2 families.<|reference_end|>
arxiv
@article{ferrando2024on, title={On the Similarity of Circuits across Languages: a Case Study on the Subject-verb Agreement Task}, author={Javier Ferrando, Marta R.Costa-juss`a}, journal={arXiv preprint arXiv:2410.06496}, year={2024}, archivePrefix={arXiv}, eprint={2410.06496}, primaryClass={cs.CL} }
ferrando2024on
arxiv-667331
2410.06497
ERCache: An Efficient and Reliable Caching Framework for Large-Scale User Representations in Meta's Ads System
<|reference_start|>ERCache: An Efficient and Reliable Caching Framework for Large-Scale User Representations in Meta's Ads System: The increasing complexity of deep learning models used for calculating user representations presents significant challenges, particularly with limited computational resources and strict service-level agreements (SLAs). Previous research efforts have focused on optimizing model inference but have overlooked a critical question: is it necessary to perform user model inference for every ad request in large-scale social networks? To address this question and these challenges, we first analyze user access patterns at Meta and find that most user model inferences occur within a short timeframe. T his observation reveals a triangular relationship among model complexity, embedding freshness, and service SLAs. Building on this insight, we designed, implemented, and evaluated ERCache, an efficient and robust caching framework for large-scale user representations in ads recommendation systems on social networks. ERCache categorizes cache into direct and failover types and applies customized settings and eviction policies for each model, effectively balancing model complexity, embedding freshness, and service SLAs, even considering the staleness introduced by caching. ERCache has been deployed at Meta for over six months, supporting more than 30 ranking models while efficiently conserving computational resources and complying with service SLA requirements.<|reference_end|>
arxiv
@article{zhou2024ercache:, title={ERCache: An Efficient and Reliable Caching Framework for Large-Scale User Representations in Meta's Ads System}, author={Fang Zhou, Yaning Huang, Dong Liang, Dai Li, Zhongke Zhang, Kai Wang, Xiao Xin, Abdallah Aboelela, Zheliang Jiang, Yang Wang, Jeff Song, Wei Zhang, Chen Liang, Huayu Li, ChongLin Sun, Hang Yang, Lei Qu, Zhan Shu, Mindi Yuan, Emanuele Maccherani, Taha Hayat, John Guo, Varna Puvvada, and Uladzimir Pashkevich}, journal={arXiv preprint arXiv:2410.06497}, year={2024}, archivePrefix={arXiv}, eprint={2410.06497}, primaryClass={cs.IR cs.AI cs.DC cs.LG} }
zhou2024ercache:
arxiv-667332
2410.06502
Chemistry-Inspired Diffusion with Non-Differentiable Guidance
<|reference_start|>Chemistry-Inspired Diffusion with Non-Differentiable Guidance: Recent advances in diffusion models have shown remarkable potential in the conditional generation of novel molecules. These models can be guided in two ways: (i) explicitly, through additional features representing the condition, or (ii) implicitly, using a property predictor. However, training property predictors or conditional diffusion models requires an abundance of labeled data and is inherently challenging in real-world applications. We propose a novel approach that attenuates the limitations of acquiring large labeled datasets by leveraging domain knowledge from quantum chemistry as a non-differentiable oracle to guide an unconditional diffusion model. Instead of relying on neural networks, the oracle provides accurate guidance in the form of estimated gradients, allowing the diffusion process to sample from a conditional distribution specified by quantum chemistry. We show that this results in more precise conditional generation of novel and stable molecular structures. Our experiments demonstrate that our method: (1) significantly reduces atomic forces, enhancing the validity of generated molecules when used for stability optimization; (2) is compatible with both explicit and implicit guidance in diffusion models, enabling joint optimization of molecular properties and stability; and (3) generalizes effectively to molecular optimization tasks beyond stability optimization.<|reference_end|>
arxiv
@article{shen2024chemistry-inspired, title={Chemistry-Inspired Diffusion with Non-Differentiable Guidance}, author={Yuchen Shen, Chenhao Zhang, Sijie Fu, Chenghui Zhou, Newell Washburn, Barnab'as P'oczos}, journal={arXiv preprint arXiv:2410.06502}, year={2024}, archivePrefix={arXiv}, eprint={2410.06502}, primaryClass={cs.LG cs.AI} }
shen2024chemistry-inspired
arxiv-667333
2410.06504
Transformer-assisted Parametric CSI Feedback for mmWave Massive MIMO Systems
<|reference_start|>Transformer-assisted Parametric CSI Feedback for mmWave Massive MIMO Systems: As a key technology to meet the ever-increasing data rate demand in beyond 5G and 6G communications, millimeter-wave (mmWave) massive multiple-input multiple-output (MIMO) systems have gained much attention recently.To make the most of mmWave massive MIMO systems, acquisition of accurate channel state information (CSI) at the base station (BS) is crucial. However, this task is by no means easy due to the CSI feedback overhead induced by the large number of antennas. In this paper, we propose a parametric CSI feedback technique for mmWave massive MIMO systems. Key idea of the proposed technique is to compress the mmWave MIMO channel matrix into a few geometric channel parameters (e.g., angles, delays, and path gains). Due to the limited scattering of mmWave signal, the number of channel parameters is much smaller than the number of antennas, thereby reducing the CSI feedback overhead significantly. Moreover, by exploiting the deep learning (DL) technique for the channel parameter extraction and the MIMO channel reconstruction, we can effectively suppress the channel quantization error. From the numerical results, we demonstrate that the proposed technique outperforms the conventional CSI feedback techniques in terms of normalized mean square error (NMSE) and bit error rate (BER).<|reference_end|>
arxiv
@article{ju2024transformer-assisted, title={Transformer-assisted Parametric CSI Feedback for mmWave Massive MIMO Systems}, author={Hyungyu Ju, Seokhyun Jeong, Seungnyun Kim, Byungju Lee and Byonghyo Shim}, journal={arXiv preprint arXiv:2410.06504}, year={2024}, archivePrefix={arXiv}, eprint={2410.06504}, primaryClass={cs.IT eess.SP math.IT} }
ju2024transformer-assisted
arxiv-667334
2410.06506
Cooperative Multi-Target Positioning for Cell-Free Massive MIMO with Multi-Agent Reinforcement Learning
<|reference_start|>Cooperative Multi-Target Positioning for Cell-Free Massive MIMO with Multi-Agent Reinforcement Learning: Cell-free massive multiple-input multiple-output (mMIMO) is a promising technology to empower next-generation mobile communication networks. In this paper, to address the computational complexity associated with conventional fingerprint positioning, we consider a novel cooperative positioning architecture that involves certain relevant access points (APs) to establish positioning similarity coefficients. Then, we propose an innovative joint positioning and correction framework employing multi-agent reinforcement learning (MARL) to tackle the challenges of high-dimensional sophisticated signal processing, which mainly leverages on the received signal strength information for preliminary positioning, supplemented by the angle of arrival information to refine the initial position estimation. Moreover, to mitigate the bias effects originating from remote APs, we design a cooperative weighted K-nearest neighbor (Co-WKNN)-based estimation scheme to select APs with a high correlation to participate in user positioning. In the numerical results, we present comparisons of various user positioning schemes, which reveal that the proposed MARL-based positioning scheme with Co-WKNN can effectively improve positioning performance. It is important to note that the cooperative positioning architecture is a critical element in striking a balance between positioning performance and computational complexity.<|reference_end|>
arxiv
@article{liu2024cooperative, title={Cooperative Multi-Target Positioning for Cell-Free Massive MIMO with Multi-Agent Reinforcement Learning}, author={Ziheng Liu, Jiayi Zhang, Enyu Shi, Yiyang Zhu, Derrick Wing Kwan Ng, and Bo Ai}, journal={arXiv preprint arXiv:2410.06506}, year={2024}, archivePrefix={arXiv}, eprint={2410.06506}, primaryClass={cs.IT eess.SP math.IT} }
liu2024cooperative
arxiv-667335
2410.06508
Towards Self-Improvement of LLMs via MCTS: Leveraging Stepwise Knowledge with Curriculum Preference Learning
<|reference_start|>Towards Self-Improvement of LLMs via MCTS: Leveraging Stepwise Knowledge with Curriculum Preference Learning: Monte Carlo Tree Search (MCTS) has recently emerged as a powerful technique for enhancing the reasoning capabilities of LLMs. Techniques such as SFT or DPO have enabled LLMs to distill high-quality behaviors from MCTS, improving their reasoning performance. However, existing distillation methods underutilize the rich trajectory information generated by MCTS, limiting the potential for improvements in LLM reasoning. In this paper, we propose AlphaLLM-CPL, a novel pairwise training framework that enables LLMs to self-improve through MCTS behavior distillation. AlphaLLM-CPL efficiently leverages MCTS trajectories via two key innovations: (1) AlphaLLM-CPL constructs stepwise trajectory pairs from child nodes sharing the same parent in the search tree, providing step-level information for more effective MCTS behavior distillation. (2) AlphaLLM-CPL introduces curriculum preference learning, dynamically adjusting the training sequence of trajectory pairs in each offline training epoch to prioritize critical learning steps and mitigate overfitting. Experimental results on mathematical reasoning tasks demonstrate that AlphaLLM-CPL significantly outperforms previous MCTS behavior distillation methods, substantially boosting the reasoning capabilities of LLMs.<|reference_end|>
arxiv
@article{wang2024towards, title={Towards Self-Improvement of LLMs via MCTS: Leveraging Stepwise Knowledge with Curriculum Preference Learning}, author={Xiyao Wang, Linfeng Song, Ye Tian, Dian Yu, Baolin Peng, Haitao Mi, Furong Huang and Dong Yu}, journal={arXiv preprint arXiv:2410.06508}, year={2024}, archivePrefix={arXiv}, eprint={2410.06508}, primaryClass={cs.LG cs.CL} }
wang2024towards
arxiv-667336
2410.06509
PFAttack: Stealthy Attack Bypassing Group Fairness in Federated Learning
<|reference_start|>PFAttack: Stealthy Attack Bypassing Group Fairness in Federated Learning: Federated learning (FL), integrating group fairness mechanisms, allows multiple clients to collaboratively train a global model that makes unbiased decisions for different populations grouped by sensitive attributes (e.g., gender and race). Due to its distributed nature, previous studies have demonstrated that FL systems are vulnerable to model poisoning attacks. However, these studies primarily focus on perturbing accuracy, leaving a critical question unexplored: Can an attacker bypass the group fairness mechanisms in FL and manipulate the global model to be biased? The motivations for such an attack vary; an attacker might seek higher accuracy, yet fairness considerations typically limit the accuracy of the global model or aim to cause ethical disruption. To address this question, we design a novel form of attack in FL, termed Profit-driven Fairness Attack (PFATTACK), which aims not to degrade global model accuracy but to bypass fairness mechanisms. Our fundamental insight is that group fairness seeks to weaken the dependence of outputs on input attributes related to sensitive information. In the proposed PFATTACK, an attacker can recover this dependence through local fine-tuning across various sensitive groups, thereby creating a biased yet accuracy-preserving malicious model and injecting it into FL through model replacement. Compared to attacks targeting accuracy, PFATTACK is more stealthy. The malicious model in PFATTACK exhibits subtle parameter variations relative to the original global model, making it robust against detection and filtering by Byzantine-resilient aggregations. Extensive experiments on benchmark datasets are conducted for four fair FL frameworks and three Byzantine-resilient aggregations against model poisoning, demonstrating the effectiveness and stealth of PFATTACK in bypassing group fairness mechanisms in FL.<|reference_end|>
arxiv
@article{gao2024pfattack:, title={PFAttack: Stealthy Attack Bypassing Group Fairness in Federated Learning}, author={Jiashi Gao, Ziwei Wang, Xiangyu Zhao, Xin Yao, Xuetao Wei}, journal={arXiv preprint arXiv:2410.06509}, year={2024}, archivePrefix={arXiv}, eprint={2410.06509}, primaryClass={cs.LG} }
gao2024pfattack:
arxiv-667337
2410.06511
TorchTitan: One-stop PyTorch native solution for production ready LLM pre-training
<|reference_start|>TorchTitan: One-stop PyTorch native solution for production ready LLM pre-training: The development of large language models (LLMs) has been instrumental in advancing state-of-the-art natural language processing applications. Training LLMs with billions of parameters and trillions of tokens require sophisticated distributed systems that enable composing and comparing several state-of-the-art techniques in order to efficiently scale across thousands of accelerators. However, existing solutions are complex, scattered across multiple libraries/repositories, lack interoperability, and are cumbersome to maintain. Thus, curating and empirically comparing training recipes require non-trivial engineering effort. This paper introduces TorchTitan, an open-source, PyTorch-native distributed training system that unifies state-of-the-art techniques, streamlining integration and reducing overhead. TorchTitan enables 3D parallelism in a modular manner with elastic scaling, providing comprehensive logging, checkpointing, and debugging tools for production-ready training. It also incorporates hardware-software co-designed solutions, leveraging features like Float8 training and SymmetricMemory. As a flexible test bed, TorchTitan facilitates custom recipe curation and comparison, allowing us to develop optimized training recipes for Llama 3.1 and provide guidance on selecting techniques for maximum efficiency based on our experiences. We thoroughly assess TorchTitan on the Llama 3.1 family of LLMs, spanning 8 billion to 405 billion parameters, and showcase its exceptional performance, modular composability, and elastic scalability. By stacking training optimizations, we demonstrate accelerations of 65.08% with 1D parallelism at the 128-GPU scale (Llama 3.1 8B), an additional 12.59% with 2D parallelism at the 256-GPU scale (Llama 3.1 70B), and an additional 30% with 3D parallelism at the 512-GPU scale (Llama 3.1 405B) on NVIDIA H100 GPUs over optimized baselines.<|reference_end|>
arxiv
@article{liang2024torchtitan:, title={TorchTitan: One-stop PyTorch native solution for production ready LLM pre-training}, author={Wanchao Liang, Tianyu Liu, Less Wright, Will Constable, Andrew Gu, Chien-Chin Huang, Iris Zhang, Wei Feng, Howard Huang, Junjie Wang, Sanket Purandare, Gokul Nadathur and Stratos Idreos}, journal={arXiv preprint arXiv:2410.06511}, year={2024}, archivePrefix={arXiv}, eprint={2410.06511}, primaryClass={cs.CL cs.AI cs.DC cs.LG} }
liang2024torchtitan:
arxiv-667338
2410.06512
In-Band Full-Duplex MIMO Systems for Simultaneous Communications and Sensing: Challenges, Methods, and Future Perspectives
<|reference_start|>In-Band Full-Duplex MIMO Systems for Simultaneous Communications and Sensing: Challenges, Methods, and Future Perspectives: In-band Full-Duplex (FD) Multiple-Input Multiple-Output (MIMO) systems offer a significant opportunity for Integrated Sensing and Communications (ISAC) due to their capability to realize simultaneous signal transmissions and receptions. This feature has been recently exploited to devise spectrum-efficient simultaneous information transmission and monostatic sensing operations, a line of research typically referred to as MIMO FD-ISAC. In this article, capitalizing on a recent FD MIMO architecture with reduced complexity analog cancellation, we present an FD-enabled framework for simultaneous communications and sensing using data signals. In contrast to communications applications, the framework's goal is not to mitigate self interference, since it includes reflections of the downlink data transmissions from targets in the FD node's vicinity, but to optimize the system parameters for the intended dual functionality. The unique characteristics and challenges of a generic MIMO FD-ISAC system are discussed along with a broad overview of state-of-the-art special cases, including numerical investigations. Several directions for future work on FD-enabled ISAC relevant to signal processing communities are also provided.<|reference_end|>
arxiv
@article{smida2024in-band, title={In-Band Full-Duplex MIMO Systems for Simultaneous Communications and Sensing: Challenges, Methods, and Future Perspectives}, author={Besma Smida and George C. Alexandropoulos and Taneli Riihonen and Md Atiqul Islam}, journal={arXiv preprint arXiv:2410.06512}, year={2024}, archivePrefix={arXiv}, eprint={2410.06512}, primaryClass={cs.IT cs.ET eess.SP math.IT} }
smida2024in-band
arxiv-667339
2410.06513
MotionRL: Align Text-to-Motion Generation to Human Preferences with Multi-Reward Reinforcement Learning
<|reference_start|>MotionRL: Align Text-to-Motion Generation to Human Preferences with Multi-Reward Reinforcement Learning: We introduce MotionRL, the first approach to utilize Multi-Reward Reinforcement Learning (RL) for optimizing text-to-motion generation tasks and aligning them with human preferences. Previous works focused on improving numerical performance metrics on the given datasets, often neglecting the variability and subjectivity of human feedback. In contrast, our novel approach uses reinforcement learning to fine-tune the motion generator based on human preferences prior knowledge of the human perception model, allowing it to generate motions that better align human preferences. In addition, MotionRL introduces a novel multi-objective optimization strategy to approximate Pareto optimality between text adherence, motion quality, and human preferences. Extensive experiments and user studies demonstrate that MotionRL not only allows control over the generated results across different objectives but also significantly enhances performance across these metrics compared to other algorithms.<|reference_end|>
arxiv
@article{liu2024motionrl:, title={MotionRL: Align Text-to-Motion Generation to Human Preferences with Multi-Reward Reinforcement Learning}, author={Xiaoyang Liu, Yunyao Mao, Wengang Zhou, Houqiang Li}, journal={arXiv preprint arXiv:2410.06513}, year={2024}, archivePrefix={arXiv}, eprint={2410.06513}, primaryClass={cs.CV} }
liu2024motionrl:
arxiv-667340
2410.06514
MORSE: An Efficient Homomorphic Secret Sharing Scheme Enabling Non-Linear Operation
<|reference_start|>MORSE: An Efficient Homomorphic Secret Sharing Scheme Enabling Non-Linear Operation: Homomorphic secret sharing (HSS) enables two servers to locally perform functions on encrypted data directly and obtain the results in the form of shares. A Paillier-based HSS solution seamlessly achieves multiplicative homomorphism and consumes less communication costs. Unfortunately, existing Paillier-based HSS schemes suffer from a large private key size, potential calculation error, expensive computation and storage overhead, and only valid on linear operations (e.g., addition and multiplication). To this end, inspired by the Paillier cryptosystem with fast encryption and decryption, we propose MORSE, an efficient homomorphic secret sharing scheme enabling non-linear operation, which enjoys a small key size, no calculation error and low overhead. In terms of functions, MORSE supports addition, subtraction, multiplication, scalar-multiplication, and comparison. Particularly, we carefully design two conversion protocols achieving the mutual conversion between one Paillier ciphertext and two secret shares, which allows MORSE to continuously perform the above operations. Rigorous analyses demonstrate that MORSE securely outputs correct results. Experimental results show that MORSE makes a runtime improvement of up to 9.3 times in terms of secure multiplication, and a communication costs reduction of up to 16.6% in secure comparison, compared to the state-of-the-art.<|reference_end|>
arxiv
@article{deng2024morse:, title={MORSE: An Efficient Homomorphic Secret Sharing Scheme Enabling Non-Linear Operation}, author={Weiquan Deng, Bowen Zhao, Yang Xiao, Yantao Zhong, Qingqi Pei, Ximeng Liu}, journal={arXiv preprint arXiv:2410.06514}, year={2024}, archivePrefix={arXiv}, eprint={2410.06514}, primaryClass={cs.CR} }
deng2024morse:
arxiv-667341
2410.06515
Studying Practitioners' Expectations on Clear Code Review Comments
<|reference_start|>Studying Practitioners' Expectations on Clear Code Review Comments: The code review comment (CRC) is pivotal in the process of modern code review. It provides reviewers with the opportunity to identify potential bugs, offer constructive feedback, and suggest improvements. Clear and concise code review comments (CRCs) facilitate the communication between developers and is crucial to the correct understanding of the issues identified and proposed solutions. Despite the importance of CRCs' clarity, there is still a lack of guidelines on what constitutes a good clarity and how to evaluate it. In this paper, we conduct a comprehensive study on understanding and evaluating the clarity of CRCs. We first derive a set of attributes related to the clarity of CRCs, namely RIE attributes (i.e., Relevance, Informativeness, and Expression), as well as their corresponding evaluation criteria based on our literature review and survey with practitioners. We then investigate the clarity of CRCs in open-source projects written in nine programming languages and find that a large portion (i.e., 28.8%) of the CRCs lack the clarity in at least one of the attributes. Finally, we propose ClearCRC, an automated framework that evaluates the clarity of CRCs. Experimental results show that ClearCRC can effectively evaluate the clarity of CRCs and outperform the baselines.<|reference_end|>
arxiv
@article{li2024studying, title={Studying Practitioners' Expectations on Clear Code Review Comments}, author={Zhenhao Li, Junkai Chen, Qiheng Mao, Xing Hu, Kui Liu, Xin Xia}, journal={arXiv preprint arXiv:2410.06515}, year={2024}, archivePrefix={arXiv}, eprint={2410.06515}, primaryClass={cs.SE} }
li2024studying
arxiv-667342
2410.06516
QuadBEV: An Efficient Quadruple-Task Perception Framework via Bird's-Eye-View Representation
<|reference_start|>QuadBEV: An Efficient Quadruple-Task Perception Framework via Bird's-Eye-View Representation: Bird's-Eye-View (BEV) perception has become a vital component of autonomous driving systems due to its ability to integrate multiple sensor inputs into a unified representation, enhancing performance in various downstream tasks. However, the computational demands of BEV models pose challenges for real-world deployment in vehicles with limited resources. To address these limitations, we propose QuadBEV, an efficient multitask perception framework that leverages the shared spatial and contextual information across four key tasks: 3D object detection, lane detection, map segmentation, and occupancy prediction. QuadBEV not only streamlines the integration of these tasks using a shared backbone and task-specific heads but also addresses common multitask learning challenges such as learning rate sensitivity and conflicting task objectives. Our framework reduces redundant computations, thereby enhancing system efficiency, making it particularly suited for embedded systems. We present comprehensive experiments that validate the effectiveness and robustness of QuadBEV, demonstrating its suitability for real-world applications.<|reference_end|>
arxiv
@article{li2024quadbev:, title={QuadBEV: An Efficient Quadruple-Task Perception Framework via Bird's-Eye-View Representation}, author={Yuxin Li, Yiheng Li, Xulei Yang, Mengying Yu, Zihang Huang, Xiaojun Wu, Chai Kiat Yeo}, journal={arXiv preprint arXiv:2410.06516}, year={2024}, archivePrefix={arXiv}, eprint={2410.06516}, primaryClass={cs.RO cs.AI} }
li2024quadbev:
arxiv-667343
2410.06519
SEGMENT+: Long Text Processing with Short-Context Language Models
<|reference_start|>SEGMENT+: Long Text Processing with Short-Context Language Models: There is a growing interest in expanding the input capacity of language models (LMs) across various domains. However, simply increasing the context window does not guarantee robust performance across diverse long-input processing tasks, such as understanding extensive documents and extracting detailed information from lengthy and noisy data. In response, we introduce SEGMENT+, a general framework that enables LMs to handle extended inputs within limited context windows efficiently. SEGMENT+ utilizes structured notes and a filtering module to manage information flow, resulting in a system that is both controllable and interpretable. Our extensive experiments across various model sizes, focusing on long-document question-answering and Needle-in-a-Haystack tasks, demonstrate the effectiveness of SEGMENT+ in improving performance.<|reference_end|>
arxiv
@article{shi2024segment+:, title={SEGMENT+: Long Text Processing with Short-Context Language Models}, author={Wei Shi, Shuang Li, Kerun Yu, Jinglei Chen, Zujie Liang, Xinhui Wu, Yuxi Qian, Feng Wei, Bo Zheng, Jiaqing Liang, Jiangjie Chen, Yanghua Xiao}, journal={arXiv preprint arXiv:2410.06519}, year={2024}, archivePrefix={arXiv}, eprint={2410.06519}, primaryClass={cs.CL} }
shi2024segment+:
arxiv-667344
2410.06520
A Novel LLM-based Two-stage Summarization Approach for Long Dialogues
<|reference_start|>A Novel LLM-based Two-stage Summarization Approach for Long Dialogues: Long document summarization poses a significant challenge in natural language processing due to input lengths that exceed the capacity of most state-of-the-art pre-trained language models. This study proposes a hierarchical framework that segments and condenses information from long documents, subsequently fine-tuning the processed text with an abstractive summarization model. Unsupervised topic segmentation methods identify semantically appropriate breakpoints. The condensation stage utilizes an unsupervised generation model to generate condensed data, and our current experiments employ ChatGPT(v3.5). The summarization stage fine-tunes the abstractive summarization model on the condensed data to generate the final results. This framework enables long documents to be processed on models even when the document length exceeds the model's maximum input size. The exclusion of the entire document from the summarization model reduces the time and computational resources required for training, making the framework suitable for contexts with constrained local computational resources.<|reference_end|>
arxiv
@article{yin2024a, title={A Novel LLM-based Two-stage Summarization Approach for Long Dialogues}, author={Yuan-Jhe Yin, Bo-Yu Chen, Berlin Chen}, journal={arXiv preprint arXiv:2410.06520}, year={2024}, archivePrefix={arXiv}, eprint={2410.06520}, primaryClass={cs.CL} }
yin2024a
arxiv-667345
2410.06521
Real-to-Sim Grasp: Rethinking the Gap between Simulation and Real World in Grasp Detection
<|reference_start|>Real-to-Sim Grasp: Rethinking the Gap between Simulation and Real World in Grasp Detection: For 6-DoF grasp detection, simulated data is expandable to train more powerful model, but it faces the challenge of the large gap between simulation and real world. Previous works bridge this gap with a sim-to-real way. However, this way explicitly or implicitly forces the simulated data to adapt to the noisy real data when training grasp detectors, where the positional drift and structural distortion within the camera noise will harm the grasp learning. In this work, we propose a Real-to-Sim framework for 6-DoF Grasp detection, named R2SGrasp, with the key insight of bridging this gap in a real-to-sim way, which directly bypasses the camera noise in grasp detector training through an inference-time real-to-sim adaption. To achieve this real-to-sim adaptation, our R2SGrasp designs the Real-to-Sim Data Repairer (R2SRepairer) to mitigate the camera noise of real depth maps in data-level, and the Real-to-Sim Feature Enhancer (R2SEnhancer) to enhance real features with precise simulated geometric primitives in feature-level. To endow our framework with the generalization ability, we construct a large-scale simulated dataset cost-efficiently to train our grasp detector, which includes 64,000 RGB-D images with 14.4 million grasp annotations. Sufficient experiments show that R2SGrasp is powerful and our real-to-sim perspective is effective. The real-world experiments further show great generalization ability of R2SGrasp. Project page is available on https://isee-laboratory.github.io/R2SGrasp.<|reference_end|>
arxiv
@article{cai2024real-to-sim, title={Real-to-Sim Grasp: Rethinking the Gap between Simulation and Real World in Grasp Detection}, author={Jia-Feng Cai, Zibo Chen, Xiao-Ming Wu, Jian-Jian Jiang, Yi-Lin Wei, Wei-Shi Zheng}, journal={arXiv preprint arXiv:2410.06521}, year={2024}, archivePrefix={arXiv}, eprint={2410.06521}, primaryClass={cs.RO} }
cai2024real-to-sim
arxiv-667346
2410.06522
On the Security of Bitstream-level JPEG Encryption with Restart Markers
<|reference_start|>On the Security of Bitstream-level JPEG Encryption with Restart Markers: This paper aims to evaluate the security of a bitstream-level JPEG encryption method using restart (RST) markers, where encrypted image can keep the JPEG file format with the same file size as non-encrypted image. Data encrypted using this method can be decoded without altering header information by employing a standard JPEG decoder. Moreover, the use of RST markers enables the definition of extended blocks divided by the markers, so spatially partial encryption and block-permutation-based encryption can be carried out. However, the security of the method was evaluated only with respect to the key space analysis for brute-force attacks and other limited attacks. Accordingly, in this paper, we evaluated the security of the method with respect to robustness against ciphertext-only attacks including state-of-the-art attacks. In experiments, the method is compared with conventional encryption methods, and it is confirmed to be robust against ciphertext-only attacks if parameters used for image encryption are carefully chosen.<|reference_end|>
arxiv
@article{hirose2024on, title={On the Security of Bitstream-level JPEG Encryption with Restart Markers}, author={Mare Hirose, Shoko Imaizumi, Hitoshi Kiya}, journal={arXiv preprint arXiv:2410.06522}, year={2024}, archivePrefix={arXiv}, eprint={2410.06522}, primaryClass={cs.CR} }
hirose2024on
arxiv-667347
2410.06523
Phase Diagram from Nonlinear Interaction between Superconducting Order and Density: Toward Data-Based Holographic Superconductor
<|reference_start|>Phase Diagram from Nonlinear Interaction between Superconducting Order and Density: Toward Data-Based Holographic Superconductor: We address an inverse problem in modeling holographic superconductors. We focus our research on the critical temperature behavior depicted by experiments. We use a physics-informed neural network method to find a mass function $M(F^2)$, which is necessary to understand phase transition behavior. This mass function describes a nonlinear interaction between superconducting order and charge carrier density. We introduce positional embedding layers to improve the learning process in our algorithm, and the Adam optimization is used to predict the critical temperature data via holographic calculation with appropriate accuracy. Consideration of the positional embedding layers is motivated by the transformer model of natural-language processing in the artificial intelligence (AI) field. We obtain holographic models that reproduce borderlines of the normal and superconducting phases provided by actual data. Our work is the first holographic attempt to match phase transition data quantitatively obtained from experiments. Also, the present work offers a new methodology for data-based holographic models.<|reference_end|>
arxiv
@article{kim2024phase, title={Phase Diagram from Nonlinear Interaction between Superconducting Order and Density: Toward Data-Based Holographic Superconductor}, author={Sejin Kim, Kyung Kiu Kim, and Yunseok Seo}, journal={arXiv preprint arXiv:2410.06523}, year={2024}, archivePrefix={arXiv}, eprint={2410.06523}, primaryClass={hep-th cond-mat.dis-nn cond-mat.supr-con cs.AI} }
kim2024phase
arxiv-667348
2410.06524
Do great minds think alike? Investigating Human-AI Complementarity in Question Answering with CAIMIRA
<|reference_start|>Do great minds think alike? Investigating Human-AI Complementarity in Question Answering with CAIMIRA: Recent advancements of large language models (LLMs) have led to claims of AI surpassing humans in natural language processing (NLP) tasks such as textual understanding and reasoning. This work investigates these assertions by introducing CAIMIRA, a novel framework rooted in item response theory (IRT) that enables quantitative assessment and comparison of problem-solving abilities of question-answering (QA) agents: humans and AI systems. Through analysis of over 300,000 responses from ~70 AI systems and 155 humans across thousands of quiz questions, CAIMIRA uncovers distinct proficiency patterns in knowledge domains and reasoning skills. Humans outperform AI systems in knowledge-grounded abductive and conceptual reasoning, while state-of-the-art LLMs like GPT-4 and LLaMA show superior performance on targeted information retrieval and fact-based reasoning, particularly when information gaps are well-defined and addressable through pattern matching or data retrieval. These findings highlight the need for future QA tasks to focus on questions that challenge not only higher-order reasoning and scientific thinking, but also demand nuanced linguistic interpretation and cross-contextual knowledge application, helping advance AI developments that better emulate or complement human cognitive abilities in real-world problem-solving.<|reference_end|>
arxiv
@article{gor2024do, title={Do great minds think alike? Investigating Human-AI Complementarity in Question Answering with CAIMIRA}, author={Maharshi Gor, Hal Daum'e III, Tianyi Zhou, Jordan Boyd-Graber}, journal={arXiv preprint arXiv:2410.06524}, year={2024}, archivePrefix={arXiv}, eprint={2410.06524}, primaryClass={cs.CL cs.AI cs.LG} }
gor2024do
arxiv-667349
2410.06525
CholeskyQR for sparse matrices
<|reference_start|>CholeskyQR for sparse matrices: CholeskyQR is an efficient algorithm for QR factorization with several advantages compared with orhter algorithms. In order to improve its orthogonality, CholeskyQR2 is developed \cite{2014}\cite{error}. To deal with ill-conditioned matrices, a shifted item $s$ is introduced and we have Shifted CholeskyQR3 \cite{Shifted}. In many problems in the industry, QR factorization for sparse matrices is very common, especially for some sparse matrices with special structures. In this work, we discuss the property of CholeskyQR-type algorithms for sparse matrices. We introduce new definitions for the input sparse matrix $X$ and divide them into two types based on column properties. We provide better sufficient conditions for $\kappa_{2}(X)$ and better shifted item $s$ for CholeskyQR-type algorithms under certain element-norm conditiones(ENCs) compared with the original ones in \cite{Shifted}\cite{error}, together with an alternative error analysis for the algorithm. The steps of analysis utilize the properties of the $g$-norm of the matrix which is given in the previous work. Moreover, a new three-step CholeskyQR-type algorithm with two shifted items called 3C is developed for sparse matrices with good orthogonality. We do numerical experiments with some typical real examples to show the advantages of improved algorithms compared with the original ones in the previous works.<|reference_end|>
arxiv
@article{guan2024choleskyqr, title={CholeskyQR for sparse matrices}, author={Haoran Guan, Yuwei Fan}, journal={arXiv preprint arXiv:2410.06525}, year={2024}, archivePrefix={arXiv}, eprint={2410.06525}, primaryClass={math.NA cs.NA} }
guan2024choleskyqr
arxiv-667350
2410.06526
KOR-Bench: Benchmarking Language Models on Knowledge-Orthogonal Reasoning Tasks
<|reference_start|>KOR-Bench: Benchmarking Language Models on Knowledge-Orthogonal Reasoning Tasks: In this paper, we introduce Knowledge-Orthogonal Reasoning (KOR), which minimizes the impact of domain-specific knowledge for a more accurate evaluation of models' reasoning abilities in out-of-distribution scenarios. Based on this concept, we propose the Knowledge-Orthogonal Reasoning Benchmark (KOR-Bench), encompassing five task categories: Operation, Logic, Cipher, Puzzle, and Counterfactual. KOR-Bench emphasizes the effectiveness of models in applying new rule descriptions to solve novel rule-driven questions, revealing that top-performing models like Claude-3.5-Sonnet and GPT-4o only achieve 58.96% and 58.00% accuracy, respectively. We conduct thorough analyses to identify bottlenecks in the Cipher task using Stepwise Prompting, discovering that two rounds of Self-Correction yield optimal results. Complex Task Processing evaluates model performance across three integrated tasks, while we also explore the impact of Tricks on the Puzzle task and visualize rule-focused attention to enhance our understanding of model behavior. We aim for KOR-Bench to be a valuable resource for enhancing models' reasoning capabilities and fostering further research in this field.<|reference_end|>
arxiv
@article{ma2024kor-bench:, title={KOR-Bench: Benchmarking Language Models on Knowledge-Orthogonal Reasoning Tasks}, author={Kaijing Ma, Xinrun Du, Yunran Wang, Haoran Zhang, Zhoufutu Wen, Xingwei Qu, Jian Yang, Jiaheng Liu, Minghao Liu, Xiang Yue, Wenhao Huang, Ge Zhang}, journal={arXiv preprint arXiv:2410.06526}, year={2024}, archivePrefix={arXiv}, eprint={2410.06526}, primaryClass={cs.DB} }
ma2024kor-bench:
arxiv-667351
2410.06527
The Sampling-Gaussian for stereo matching
<|reference_start|>The Sampling-Gaussian for stereo matching: The soft-argmax operation is widely adopted in neural network-based stereo matching methods to enable differentiable regression of disparity. However, network trained with soft-argmax is prone to being multimodal due to absence of explicit constraint to the shape of the probability distribution. Previous methods leverages Laplacian distribution and cross-entropy for training but failed to effectively improve the accuracy and even compromises the efficiency of the network. In this paper, we conduct a detailed analysis of the previous distribution-based methods and propose a novel supervision method for stereo matching, Sampling-Gaussian. We sample from the Gaussian distribution for supervision. Moreover, we interpret the training as minimizing the distance in vector space and propose a combined loss of L1 loss and cosine similarity loss. Additionally, we leveraged bilinear interpolation to upsample the cost volume. Our method can be directly applied to any soft-argmax-based stereo matching method without a reduction in efficiency. We have conducted comprehensive experiments to demonstrate the superior performance of our Sampling-Gaussian. The experimental results prove that we have achieved better accuracy on five baseline methods and two datasets. Our method is easy to implement, and the code is available online.<|reference_end|>
arxiv
@article{pan2024the, title={The Sampling-Gaussian for stereo matching}, author={Baiyu Pan and jichao jiao and Bowen Yao and Jianxin Pang and Jun Cheng}, journal={arXiv preprint arXiv:2410.06527}, year={2024}, archivePrefix={arXiv}, eprint={2410.06527}, primaryClass={cs.CV cs.AI} }
pan2024the
arxiv-667352
2410.06528
Deciding subspace reachability problems with application to Skolem's Problem
<|reference_start|>Deciding subspace reachability problems with application to Skolem's Problem: The higher-dimensional version of Kannan and Lipton's Orbit Problem asks whether it is decidable if a target vector space can be reached from a starting point under repeated application of a linear transformation. This problem has remained open since its formulation, and in fact generalizes Skolem's Problem -- a long-standing open problem concerning the existence of zeros in linear recurrence sequences. Both problems have traditionally been studied using algebraic and number theoretic machinery. In contrast, this paper reduces the Orbit Problem to an equivalent version in real projective space, introducing a basic geometric reference for examining and deciding problem instances. We find this geometric toolkit enables basic proofs of sweeping assertions concerning the decidability of certain problem classes, including results where the only other known proofs rely on sophisticated number-theoretic arguments.<|reference_end|>
arxiv
@article{everett2024deciding, title={Deciding subspace reachability problems with application to Skolem's Problem}, author={Samuel Everett}, journal={arXiv preprint arXiv:2410.06528}, year={2024}, archivePrefix={arXiv}, eprint={2410.06528}, primaryClass={cs.LO} }
everett2024deciding
arxiv-667353
2410.06529
Quasi-stationary Subdivision Schemes in Arbitrary Dimensions
<|reference_start|>Quasi-stationary Subdivision Schemes in Arbitrary Dimensions: Stationary subdivision schemes have been extensively studied and have numerous applications in CAGD and wavelet analysis. To have high-order smoothness of the scheme, it is usually inevitable to enlarge the support of the mask that is used, which is a major difficulty with stationary subdivision schemes due to complicated implementation and dramatically increased special subdivision rules at extraordinary vertices. In this paper, we introduce the notion of a multivariate quasi-stationary subdivision scheme and fully characterize its convergence and smoothness. We will also discuss the general procedure of designing interpolatory masks with short support that yields smooth quasi-stationary subdivision schemes. Specifically, using the dyadic dilation of both triangular and quadrilateral meshes, for each smoothness exponent $m=1,2$, we obtain examples of $C^m$-convergent quasi-stationary $2I_2$-subdivision schemes with bivariate symmetric masks having at most $m$-ring stencils. Our examples demonstrate the advantage of quasi-stationary subdivision schemes, which can circumvent the difficulty above with stationary subdivision schemes.<|reference_end|>
arxiv
@article{lu2024quasi-stationary, title={Quasi-stationary Subdivision Schemes in Arbitrary Dimensions}, author={Ran Lu, Bin Han}, journal={arXiv preprint arXiv:2410.06529}, year={2024}, archivePrefix={arXiv}, eprint={2410.06529}, primaryClass={math.NA cs.NA} }
lu2024quasi-stationary
arxiv-667354
2410.06530
TopoTune : A Framework for Generalized Combinatorial Complex Neural Networks
<|reference_start|>TopoTune : A Framework for Generalized Combinatorial Complex Neural Networks: Graph Neural Networks (GNNs) excel in learning from relational datasets, processing node and edge features in a way that preserves the symmetries of the graph domain. However, many complex systems--such as biological or social networks--involve multiway complex interactions that are more naturally represented by higher-order topological spaces. The emerging field of Topological Deep Learning (TDL) aims to accommodate and leverage these higher-order structures. Combinatorial Complex Neural Networks (CCNNs), fairly general TDL models, have been shown to be more expressive and better performing than GNNs. However, differently from the graph deep learning ecosystem, TDL lacks a principled and standardized framework for easily defining new architectures, restricting its accessibility and applicability. To address this issue, we introduce Generalized CCNNs (GCCNs), a novel simple yet powerful family of TDL models that can be used to systematically transform any (graph) neural network into its TDL counterpart. We prove that GCCNs generalize and subsume CCNNs, while extensive experiments on a diverse class of GCCNs show that these architectures consistently match or outperform CCNNs, often with less model complexity. In an effort to accelerate and democratize TDL, we introduce TopoTune, a lightweight software that allows practitioners to define, build, and train GCCNs with unprecedented flexibility and ease.<|reference_end|>
arxiv
@article{papillon2024topotune, title={TopoTune : A Framework for Generalized Combinatorial Complex Neural Networks}, author={Mathilde Papillon, Guillermo Bern'ardez, Claudio Battiloro, Nina Miolane}, journal={arXiv preprint arXiv:2410.06530}, year={2024}, archivePrefix={arXiv}, eprint={2410.06530}, primaryClass={cs.LG cs.AI} }
papillon2024topotune
arxiv-667355
2410.06533
OpenEarable ExG: Open-Source Hardware for Ear-Based Biopotential Sensing Applications
<|reference_start|>OpenEarable ExG: Open-Source Hardware for Ear-Based Biopotential Sensing Applications: While traditional earphones primarily offer private audio spaces, so-called "earables" emerged to offer a variety of sensing capabilities. Pioneering platforms like OpenEarable have introduced novel sensing platforms targeted at the ears, incorporating various sensors. The proximity of the ears to the eyes, brain, and facial muscles has also sparked investigation into sensing biopotentials. However, currently there is no platform available that is targeted at the ears to sense biopotentials. To address this gap, we introduce OpenEarable ExG - an open-source hardware platform designed to measure biopotentials in and around the ears. OpenEarable ExG can be freely configured and has up to 7 sensing channels. We initially validate OpenEarable ExG in a study with a left-right in-ear dual-electrode montage setup with 3 participants. Our results demonstrate the successful detection of smooth pursuit eye movements via Electrooculography (EOG), alpha brain activity via Electroencephalography (EEG), and jaw clenching via Electromyography (EMG). OpenEarable ExG is part of the OpenEarable initiative and is fully open-source under MIT license.<|reference_end|>
arxiv
@article{lepold2024openearable, title={OpenEarable ExG: Open-Source Hardware for Ear-Based Biopotential Sensing Applications}, author={Philipp Lepold, Tobias R"oddiger, Tobias King, Kai Kunze, Christoph Maurer, Michael Beigl}, journal={UbiComp '24: Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, Pages 916 - 92}, year={2024}, doi={10.1145/3675094.3678480}, archivePrefix={arXiv}, eprint={2410.06533}, primaryClass={cs.AR} }
lepold2024openearable
arxiv-667356
2410.06534
EEG-estimated functional connectivity, and not behavior, differentiates Parkinson's patients from health controls during the Simon conflict task
<|reference_start|>EEG-estimated functional connectivity, and not behavior, differentiates Parkinson's patients from health controls during the Simon conflict task: Neural biomarkers that can classify or predict disease are of broad interest to the neurological and psychiatric communities. Such biomarkers can be informative of disease state or treatment efficacy, even before there are changes in symptoms and/or behavior. This work investigates EEG-estimated functional connectivity (FC) as a Parkinson's Disease (PD) biomarker. Specifically, we investigate FC mediated via neural oscillations and consider such activity during the Simons conflict task. This task yields sensory-motor conflict, and one might expect differences in behavior between PD patients and healthy controls (HCs). In addition to considering spatially focused approaches, such as FC, as a biomarker, we also consider temporal biomarkers, which are more sensitive to ongoing changes in neural activity. We find that FC, estimated from delta (1-4Hz) and theta (4-7Hz) oscillations, yields spatial FC patterns significantly better at distinguishing PD from HC than temporal features or behavior. This study reinforces that FC in spectral bands is informative of differences in brain-wide processes and can serve as a biomarker distinguishing normal brain function from that seen in disease.<|reference_end|>
arxiv
@article{sun2024eeg-estimated, title={EEG-estimated functional connectivity, and not behavior, differentiates Parkinson's patients from health controls during the Simon conflict task}, author={Xiaoxiao Sun, Chongkun Zhao, Sharath Koorathota, Paul Sajda}, journal={arXiv preprint arXiv:2410.06534}, year={2024}, archivePrefix={arXiv}, eprint={2410.06534}, primaryClass={q-bio.NC cs.LG} }
sun2024eeg-estimated
arxiv-667357
2410.06535
Happy: A Debiased Learning Framework for Continual Generalized Category Discovery
<|reference_start|>Happy: A Debiased Learning Framework for Continual Generalized Category Discovery: Constantly discovering novel concepts is crucial in evolving environments. This paper explores the underexplored task of Continual Generalized Category Discovery (C-GCD), which aims to incrementally discover new classes from unlabeled data while maintaining the ability to recognize previously learned classes. Although several settings are proposed to study the C-GCD task, they have limitations that do not reflect real-world scenarios. We thus study a more practical C-GCD setting, which includes more new classes to be discovered over a longer period, without storing samples of past classes. In C-GCD, the model is initially trained on labeled data of known classes, followed by multiple incremental stages where the model is fed with unlabeled data containing both old and new classes. The core challenge involves two conflicting objectives: discover new classes and prevent forgetting old ones. We delve into the conflicts and identify that models are susceptible to prediction bias and hardness bias. To address these issues, we introduce a debiased learning framework, namely Happy, characterized by Hardness-aware prototype sampling and soft entropy regularization. For the prediction bias, we first introduce clustering-guided initialization to provide robust features. In addition, we propose soft entropy regularization to assign appropriate probabilities to new classes, which can significantly enhance the clustering performance of new classes. For the harness bias, we present the hardness-aware prototype sampling, which can effectively reduce the forgetting issue for previously seen classes, especially for difficult classes. Experimental results demonstrate our method proficiently manages the conflicts of C-GCD and achieves remarkable performance across various datasets, e.g., 7.5% overall gains on ImageNet-100. Our code is publicly available at https://github.com/mashijie1028/Happy-CGCD.<|reference_end|>
arxiv
@article{ma2024happy:, title={Happy: A Debiased Learning Framework for Continual Generalized Category Discovery}, author={Shijie Ma, Fei Zhu, Zhun Zhong, Wenzhuo Liu, Xu-Yao Zhang, Cheng-Lin Liu}, journal={arXiv preprint arXiv:2410.06535}, year={2024}, archivePrefix={arXiv}, eprint={2410.06535}, primaryClass={cs.CV} }
ma2024happy:
arxiv-667358
2410.06536
Learning Recommender Systems with Soft Target: A Decoupled Perspective
<|reference_start|>Learning Recommender Systems with Soft Target: A Decoupled Perspective: Learning recommender systems with multi-class optimization objective is a prevalent setting in recommendation. However, as observed user feedback often accounts for a tiny fraction of the entire item pool, the standard Softmax loss tends to ignore the difference between potential positive feedback and truly negative feedback. To address this challenge, we propose a novel decoupled soft label optimization framework to consider the objectives as two aspects by leveraging soft labels, including target confidence and the latent interest distribution of non-target items. Futhermore, based on our carefully theoretical analysis, we design a decoupled loss function to flexibly adjust the importance of these two aspects. To maximize the performance of the proposed method, we additionally present a sensible soft-label generation algorithm that models a label propagation algorithm to explore users' latent interests in unobserved feedback via neighbors. We conduct extensive experiments on various recommendation system models and public datasets, the results demonstrate the effectiveness and generality of the proposed method.<|reference_end|>
arxiv
@article{zhang2024learning, title={Learning Recommender Systems with Soft Target: A Decoupled Perspective}, author={Hao Zhang, Mingyue Cheng, Qi Liu, Yucong Luo, Rui Li, Enhong Chen}, journal={arXiv preprint arXiv:2410.06536}, year={2024}, archivePrefix={arXiv}, eprint={2410.06536}, primaryClass={cs.IR} }
zhang2024learning
arxiv-667359
2410.06541
Chip-Tuning: Classify Before Language Models Say
<|reference_start|>Chip-Tuning: Classify Before Language Models Say: The rapid development in the performance of large language models (LLMs) is accompanied by the escalation of model size, leading to the increasing cost of model training and inference. Previous research has discovered that certain layers in LLMs exhibit redundancy, and removing these layers brings only marginal loss in model performance. In this paper, we adopt the probing technique to explain the layer redundancy in LLMs and demonstrate that language models can be effectively pruned with probing classifiers. We propose chip-tuning, a simple and effective structured pruning framework specialized for classification problems. Chip-tuning attaches tiny probing classifiers named chips to different layers of LLMs, and trains chips with the backbone model frozen. After selecting a chip for classification, all layers subsequent to the attached layer could be removed with marginal performance loss. Experimental results on various LLMs and datasets demonstrate that chip-tuning significantly outperforms previous state-of-the-art baselines in both accuracy and pruning ratio, achieving a pruning ratio of up to 50%. We also find that chip-tuning could be applied on multimodal models, and could be combined with model finetuning, proving its excellent compatibility.<|reference_end|>
arxiv
@article{zhu2024chip-tuning:, title={Chip-Tuning: Classify Before Language Models Say}, author={Fangwei Zhu, Dian Li, Jiajun Huang, Gang Liu, Hui Wang, Zhifang Sui}, journal={arXiv preprint arXiv:2410.06541}, year={2024}, archivePrefix={arXiv}, eprint={2410.06541}, primaryClass={cs.CL cs.AI} }
zhu2024chip-tuning:
arxiv-667360
2410.06542
MedImageInsight: An Open-Source Embedding Model for General Domain Medical Imaging
<|reference_start|>MedImageInsight: An Open-Source Embedding Model for General Domain Medical Imaging: In this work, we present MedImageInsight, an open-source medical imaging embedding model. MedImageInsight is trained on medical images with associated text and labels across a diverse collection of domains, including X-Ray, CT, MRI, dermoscopy, OCT, fundus photography, ultrasound, histopathology, and mammography. Rigorous evaluations demonstrate MedImageInsight's ability to achieve state-of-the-art (SOTA) or human expert level performance across classification, image-image search, and fine-tuning tasks. Specifically, on public datasets, MedImageInsight achieves SOTA in CT 3D medical image retrieval, as well as SOTA in disease classification and search for chest X-ray, dermatology, and OCT imaging. Furthermore, MedImageInsight achieves human expert performance in bone age estimation (on both public and partner data), as well as AUC above 0.9 in most other domains. When paired with a text decoder, MedImageInsight achieves near SOTA level single image report findings generation with less than 10\% the parameters of other models. Compared to fine-tuning GPT-4o with only MIMIC-CXR data for the same task, MedImageInsight outperforms in clinical metrics, but underperforms on lexical metrics where GPT-4o sets a new SOTA. Importantly for regulatory purposes, MedImageInsight can generate ROC curves, adjust sensitivity and specificity based on clinical need, and provide evidence-based decision support through image-image search (which can also enable retrieval augmented generation). In an independent clinical evaluation of image-image search in chest X-ray, MedImageInsight outperformed every other publicly available foundation model evaluated by large margins (over 6 points AUC), and significantly outperformed other models in terms of AI fairness (across age and gender). We hope releasing MedImageInsight will help enhance collective progress in medical imaging AI research and development.<|reference_end|>
arxiv
@article{codella2024medimageinsight:, title={MedImageInsight: An Open-Source Embedding Model for General Domain Medical Imaging}, author={Noel C. F. Codella, Ying Jin, Shrey Jain, Yu Gu, Ho Hin Lee, Asma Ben Abacha, Alberto Santamaria-Pang, Will Guyman, Naiteek Sangani, Sheng Zhang, Hoifung Poon, Stephanie Hyland, Shruthi Bannur, Javier Alvarez-Valle, Xue Li, John Garrett, Alan McMillan, Gaurav Rajguru, Madhu Maddi, Nilesh Vijayrania, Rehaan Bhimai, Nick Mecklenburg, Rupal Jain, Daniel Holstein, Naveen Gaur, Vijay Aski, Jenq-Neng Hwang, Thomas Lin, Ivan Tarapov, Matthew Lungren, Mu Wei}, journal={arXiv preprint arXiv:2410.06542}, year={2024}, archivePrefix={arXiv}, eprint={2410.06542}, primaryClass={eess.IV cs.CV} }
codella2024medimageinsight:
arxiv-667361
2410.06543
Gumbel Rao Monte Carlo based Bi-Modal Neural Architecture Search for Audio-Visual Deepfake Detection
<|reference_start|>Gumbel Rao Monte Carlo based Bi-Modal Neural Architecture Search for Audio-Visual Deepfake Detection: Deepfakes pose a critical threat to biometric authentication systems by generating highly realistic synthetic media. Existing multimodal deepfake detectors often struggle to adapt to diverse data and rely on simple fusion methods. To address these challenges, we propose Gumbel-Rao Monte Carlo Bi-modal Neural Architecture Search (GRMC-BMNAS), a novel architecture search framework that employs Gumbel-Rao Monte Carlo sampling to optimize multimodal fusion. It refines the Straight through Gumbel Softmax (STGS) method by reducing variance with Rao-Blackwellization, stabilizing network training. Using a two-level search approach, the framework optimizes the network architecture, parameters, and performance. Crucial features are efficiently identified from backbone networks, while within the cell structure, a weighted fusion operation integrates information from various sources. By varying parameters such as temperature and number of Monte carlo samples yields an architecture that maximizes classification performance and better generalisation capability. Experimental results on the FakeAVCeleb and SWAN-DF datasets demonstrate an impressive AUC percentage of 95.4\%, achieved with minimal model parameters.<|reference_end|>
arxiv
@article{pn2024gumbel, title={Gumbel Rao Monte Carlo based Bi-Modal Neural Architecture Search for Audio-Visual Deepfake Detection}, author={Aravinda Reddy PN, Raghavendra Ramachandra, Krothapalli Sreenivasa Rao, Pabitra Mitra Vinod Rathod}, journal={arXiv preprint arXiv:2410.06543}, year={2024}, archivePrefix={arXiv}, eprint={2410.06543}, primaryClass={cs.CR cs.SD eess.AS} }
pn2024gumbel
arxiv-667362
2410.06544
SRC-gAudio: Sampling-Rate-Controlled Audio Generation
<|reference_start|>SRC-gAudio: Sampling-Rate-Controlled Audio Generation: We introduce SRC-gAudio, a novel audio generation model designed to facilitate text-to-audio generation across a wide range of sampling rates within a single model architecture. SRC-gAudio incorporates the sampling rate as part of the generation condition to guide the diffusion-based audio generation process. Our model enables the generation of audio at multiple sampling rates with a single unified model. Furthermore, we explore the potential benefits of large-scale, low-sampling-rate data in enhancing the generation quality of high-sampling-rate audio. Through extensive experiments, we demonstrate that SRC-gAudio effectively generates audio under controlled sampling rates. Additionally, our results indicate that pre-training on low-sampling-rate data can lead to significant improvements in audio quality across various metrics.<|reference_end|>
arxiv
@article{li2024src-gaudio:, title={SRC-gAudio: Sampling-Rate-Controlled Audio Generation}, author={Chenxing Li, Manjie Xu, Dong Yu}, journal={arXiv preprint arXiv:2410.06544}, year={2024}, archivePrefix={arXiv}, eprint={2410.06544}, primaryClass={cs.SD eess.AS} }
li2024src-gaudio:
arxiv-667363
2410.06545
Signal Watermark on Large Language Models
<|reference_start|>Signal Watermark on Large Language Models: As Large Language Models (LLMs) become increasingly sophisticated, they raise significant security concerns, including the creation of fake news and academic misuse. Most detectors for identifying model-generated text are limited by their reliance on variance in perplexity and burstiness, and they require substantial computational resources. In this paper, we proposed a watermarking method embedding a specific watermark into the text during its generation by LLMs, based on a pre-defined signal pattern. This technique not only ensures the watermark's invisibility to humans but also maintains the quality and grammatical integrity of model-generated text. We utilize LLMs and Fast Fourier Transform (FFT) for token probability computation and detection of the signal watermark. The unique application of signal processing principles within the realm of text generation by LLMs allows for subtle yet effective embedding of watermarks, which do not compromise the quality or coherence of the generated text. Our method has been empirically validated across multiple LLMs, consistently maintaining high detection accuracy, even with variations in temperature settings during text generation. In the experiment of distinguishing between human-written and watermarked text, our method achieved an AUROC score of 0.97, significantly outperforming existing methods like GPTZero, which scored 0.64. The watermark's resilience to various attacking scenarios further confirms its robustness, addressing significant challenges in model-generated text authentication.<|reference_end|>
arxiv
@article{xu2024signal, title={Signal Watermark on Large Language Models}, author={Zhenyu Xu and Victor S. Sheng}, journal={arXiv preprint arXiv:2410.06545}, year={2024}, archivePrefix={arXiv}, eprint={2410.06545}, primaryClass={cs.CR cs.LG} }
xu2024signal
arxiv-667364
2410.06547
TuringQ: Benchmarking AI Comprehension in Theory of Computation
<|reference_start|>TuringQ: Benchmarking AI Comprehension in Theory of Computation: We present TuringQ, the first benchmark designed to evaluate the reasoning capabilities of large language models (LLMs) in the theory of computation. TuringQ consists of 4,006 undergraduate and graduate-level question-answer pairs, categorized into four difficulty levels and covering seven core theoretical areas. We evaluate several open-source LLMs, as well as GPT-4, using Chain of Thought prompting and expert human assessment. Additionally, we propose an automated LLM-based evaluation system that demonstrates competitive accuracy when compared to human evaluation. Fine-tuning a Llama3-8B model on TuringQ shows measurable improvements in reasoning ability and out-of-domain tasks such as algebra. TuringQ serves as both a benchmark and a resource for enhancing LLM performance in complex computational reasoning tasks. Our analysis offers insights into LLM capabilities and advances in AI comprehension of theoretical computer science.<|reference_end|>
arxiv
@article{zahraei2024turingq:, title={TuringQ: Benchmarking AI Comprehension in Theory of Computation}, author={Pardis Sadat Zahraei and Ehsaneddin Asgari}, journal={arXiv preprint arXiv:2410.06547}, year={2024}, archivePrefix={arXiv}, eprint={2410.06547}, primaryClass={cs.CL cs.FL} }
zahraei2024turingq:
arxiv-667365
2410.06549
DiffGAD: A Diffusion-based Unsupervised Graph Anomaly Detector
<|reference_start|>DiffGAD: A Diffusion-based Unsupervised Graph Anomaly Detector: Graph Anomaly Detection (GAD) is crucial for identifying abnormal entities within networks, garnering significant attention across various fields. Traditional unsupervised methods, which decode encoded latent representations of unlabeled data with a reconstruction focus, often fail to capture critical discriminative content, leading to suboptimal anomaly detection. To address these challenges, we present a Diffusion-based Graph Anomaly Detector (DiffGAD). At the heart of DiffGAD is a novel latent space learning paradigm, meticulously designed to enhance its proficiency by guiding it with discriminative content. This innovative approach leverages diffusion sampling to infuse the latent space with discriminative content and introduces a content-preservation mechanism that retains valuable information across different scales, significantly improving its adeptness at identifying anomalies with limited time and space complexity. Our comprehensive evaluation of DiffGAD, conducted on six real-world and large-scale datasets with various metrics, demonstrated its exceptional performance.<|reference_end|>
arxiv
@article{li2024diffgad:, title={DiffGAD: A Diffusion-based Unsupervised Graph Anomaly Detector}, author={Jinghan Li, Yuan Gao, Jinda Lu, Junfeng Fang, Congcong Wen, Hui Lin, Xiang Wang}, journal={arXiv preprint arXiv:2410.06549}, year={2024}, archivePrefix={arXiv}, eprint={2410.06549}, primaryClass={cs.LG cs.AI cs.SI} }
li2024diffgad:
arxiv-667366
2410.06550
Investigating Cost-Efficiency of LLM-Generated Training Data for Conversational Semantic Frame Analysis
<|reference_start|>Investigating Cost-Efficiency of LLM-Generated Training Data for Conversational Semantic Frame Analysis: Recent studies have demonstrated that few-shot learning allows LLMs to generate training data for supervised models at a low cost. However, the quality of LLM-generated data may not entirely match that of human-labeled data. This raises a crucial question: how should one balance the trade-off between the higher quality but more expensive human data and the lower quality yet substantially cheaper LLM-generated data? In this paper, we synthesized training data for conversational semantic frame analysis using GPT-4 and examined how to allocate budgets optimally to achieve the best performance. Our experiments, conducted across various budget levels, reveal that optimal cost-efficiency is achieved by combining both human and LLM-generated data across a wide range of budget levels. Notably, as the budget decreases, a higher proportion of LLM-generated data becomes more preferable.<|reference_end|>
arxiv
@article{matta2024investigating, title={Investigating Cost-Efficiency of LLM-Generated Training Data for Conversational Semantic Frame Analysis}, author={Shiho Matta, Yin Jou Huang, Fei Cheng, Hirokazu Kiyomaru and Yugo Murawaki}, journal={arXiv preprint arXiv:2410.06550}, year={2024}, archivePrefix={arXiv}, eprint={2410.06550}, primaryClass={cs.CL cs.AI} }
matta2024investigating
arxiv-667367
2410.06551
InstantIR: Blind Image Restoration with Instant Generative Reference
<|reference_start|>InstantIR: Blind Image Restoration with Instant Generative Reference: Handling test-time unknown degradation is the major challenge in Blind Image Restoration (BIR), necessitating high model generalization. An effective strategy is to incorporate prior knowledge, either from human input or generative model. In this paper, we introduce Instant-reference Image Restoration (InstantIR), a novel diffusion-based BIR method which dynamically adjusts generation condition during inference. We first extract a compact representation of the input via a pre-trained vision encoder. At each generation step, this representation is used to decode current diffusion latent and instantiate it in the generative prior. The degraded image is then encoded with this reference, providing robust generation condition. We observe the variance of generative references fluctuate with degradation intensity, which we further leverage as an indicator for developing a sampling algorithm adaptive to input quality. Extensive experiments demonstrate InstantIR achieves state-of-the-art performance and offering outstanding visual quality. Through modulating generative references with textual description, InstantIR can restore extreme degradation and additionally feature creative restoration.<|reference_end|>
arxiv
@article{huang2024instantir:, title={InstantIR: Blind Image Restoration with Instant Generative Reference}, author={Jen-Yuan Huang, Haofan Wang, Qixun Wang, Xu Bai, Hao Ai, Peng Xing, Jen-Tse Huang}, journal={arXiv preprint arXiv:2410.06551}, year={2024}, archivePrefix={arXiv}, eprint={2410.06551}, primaryClass={cs.CV cs.AI cs.LG} }
huang2024instantir:
arxiv-667368
2410.06552
Ventilator pressure prediction using recurrent neural network
<|reference_start|>Ventilator pressure prediction using recurrent neural network: This paper presents a recurrent neural network approach to simulating mechanical ventilator pressure. The traditional mechanical ventilator has a control pressure that is monitored by a medical practitioner and can behave incorrectly if the proper pressure is not applied. This paper takes advantage of recent research and develops a simulator based on a deep sequence model to predict airway pressure in the respiratory circuit during the inspiratory phase of a breath given a time series of control parameters and lung attributes. This method demonstrates the effectiveness of neural network-based controllers in tracking pressure wave forms significantly better than the current industry standard and provides insights into the development of effective and robust pressure-controlled mechanical ventilators. The paper will measure as the mean absolute error between the predicted and actual pressures during the inspiratory phase of each breath.<|reference_end|>
arxiv
@article{diao2024ventilator, title={Ventilator pressure prediction using recurrent neural network}, author={Su Diao, Changsong Wei, Junyu Wang, Yizhou Li}, journal={arXiv preprint arXiv:2410.06552}, year={2024}, archivePrefix={arXiv}, eprint={2410.06552}, primaryClass={cs.DC} }
diao2024ventilator
arxiv-667369
2410.06553
DCP: Learning Accelerator Dataflow for Neural Network via Propagation
<|reference_start|>DCP: Learning Accelerator Dataflow for Neural Network via Propagation: Deep neural network (DNN) hardware (HW) accelerators have achieved great success in improving DNNs' performance and efficiency. One key reason is dataflow in executing a DNN layer, including on-chip data partitioning, computation parallelism, and scheduling policy, which have large impacts on latency and energy consumption. Unlike prior works that required considerable efforts from HW engineers to design suitable dataflows for different DNNs, this work proposes an efficient data-centric approach, named Dataflow Code Propagation (DCP), to automatically find the optimal dataflow for DNN layers in seconds without human effort. It has several attractive benefits that prior arts do not have. (i) We translate the HW dataflow configuration into a code representation in a unified dataflow coding space, which can be optimized by backpropagating gradients given a DNN layer or network. (ii) DCP learns a neural predictor to efficiently update the dataflow codes towards the desired gradient directions to minimize various optimization objectives e.g., latency and energy. (iii) It can be easily generalized to unseen HW configurations in a zero-shot or few-shot learning manner. For example, without using additional training data, DCP surpasses the GAMMA method that performs a full search using thousands of samples. Extensive experiments on several representative models such as MobileNet, ResNet, and ViT show that DCP outperforms its counterparts in various settings.<|reference_end|>
arxiv
@article{xu2024dcp:, title={DCP: Learning Accelerator Dataflow for Neural Network via Propagation}, author={Peng Xu, Wenqi Shao, Mingyu Ding, Ping Luo}, journal={arXiv preprint arXiv:2410.06553}, year={2024}, archivePrefix={arXiv}, eprint={2410.06553}, primaryClass={cs.LG eess.IV} }
xu2024dcp:
arxiv-667370
2410.06554
The Accuracy Paradox in RLHF: When Better Reward Models Don't Yield Better Language Models
<|reference_start|>The Accuracy Paradox in RLHF: When Better Reward Models Don't Yield Better Language Models: Reinforcement Learning from Human Feedback significantly enhances Natural Language Processing by aligning language models with human expectations. A critical factor in this alignment is the strength of reward models used during training. This study explores whether stronger reward models invariably lead to better language models. In this paper, through experiments on relevance, factuality, and completeness tasks using the QA-FEEDBACK dataset and reward models based on Longformer, we uncover a surprising paradox: language models trained with moderately accurate reward models outperform those guided by highly accurate ones. This challenges the widely held belief that stronger reward models always lead to better language models, and opens up new avenues for future research into the key factors driving model performance and how to choose the most suitable reward models. Code and additional details are available at [https://github.com/EIT-NLP/AccuracyParadox-RLHF](https://github.com/EIT-NLP/AccuracyParadox-RLHF).<|reference_end|>
arxiv
@article{chen2024the, title={The Accuracy Paradox in RLHF: When Better Reward Models Don't Yield Better Language Models}, author={Yanjun Chen, Dawei Zhu, Yirong Sun, Xinghao Chen, Wei Zhang, Xiaoyu Shen}, journal={arXiv preprint arXiv:2410.06554}, year={2024}, archivePrefix={arXiv}, eprint={2410.06554}, primaryClass={cs.CL cs.AI} }
chen2024the
arxiv-667371
2410.06555
ING-VP: MLLMs cannot Play Easy Vision-based Games Yet
<|reference_start|>ING-VP: MLLMs cannot Play Easy Vision-based Games Yet: As multimodal large language models (MLLMs) continue to demonstrate increasingly competitive performance across a broad spectrum of tasks, more intricate and comprehensive benchmarks have been developed to assess these cutting-edge models. These benchmarks introduce new challenges to core capabilities such as perception, reasoning, and planning. However, existing multimodal benchmarks fall short in providing a focused evaluation of multi-step planning based on spatial relationships in images. To bridge this gap, we present ING-VP, the first INteractive Game-based Vision Planning benchmark, specifically designed to evaluate the spatial imagination and multi-step reasoning abilities of MLLMs. ING-VP features 6 distinct games, encompassing 300 levels, each with 6 unique configurations. A single model engages in over 60,000 rounds of interaction. The benchmark framework allows for multiple comparison settings, including image-text vs. text-only inputs, single-step vs. multi-step reasoning, and with-history vs. without-history conditions, offering valuable insights into the model's capabilities. We evaluated numerous state-of-the-art MLLMs, with the highest-performing model, Claude-3.5 Sonnet, achieving an average accuracy of only 3.37%, far below the anticipated standard. This work aims to provide a specialized evaluation framework to drive advancements in MLLMs' capacity for complex spatial reasoning and planning. The code is publicly available at https://github.com/Thisisus7/ING-VP.git.<|reference_end|>
arxiv
@article{zhang2024ing-vp:, title={ING-VP: MLLMs cannot Play Easy Vision-based Games Yet}, author={Haoran Zhang, Hangyu Guo, Shuyue Guo, Meng Cao, Wenhao Huang, Jiaheng Liu, Ge Zhang}, journal={arXiv preprint arXiv:2410.06555}, year={2024}, archivePrefix={arXiv}, eprint={2410.06555}, primaryClass={cs.CL} }
zhang2024ing-vp:
arxiv-667372
2410.06556
MPC-guided, Data-driven Fuzzy Controller Synthesis
<|reference_start|>MPC-guided, Data-driven Fuzzy Controller Synthesis: Model predictive control (MPC) is a powerful control technique for online optimization using system model-based predictions over a finite time horizon. However, the computational cost MPC requires can be prohibitive in resource-constrained computer systems. This paper presents a fuzzy controller synthesis framework guided by MPC. In the proposed framework, training data is obtained from MPC closed-loop simulations and is used to optimize a low computational complexity controller to emulate the response of MPC. In particular, autoregressive moving average (ARMA) controllers are trained using data obtained from MPC closed-loop simulations, such that each ARMA controller emulates the response of the MPC controller under particular desired conditions. Using a Takagi-Sugeno (T-S) fuzzy system, the responses of all the trained ARMA controllers are then weighted depending on the measured system conditions, resulting in the Fuzzy-Autoregressive Moving Average (F-ARMA) controller. The effectiveness of the trained F-ARMA controllers is illustrated via numerical examples.<|reference_end|>
arxiv
@article{salazar2024mpc-guided,, title={MPC-guided, Data-driven Fuzzy Controller Synthesis}, author={Juan Augusto Paredes Salazar, Ankit Goel}, journal={arXiv preprint arXiv:2410.06556}, year={2024}, archivePrefix={arXiv}, eprint={2410.06556}, primaryClass={eess.SY cs.SY} }
salazar2024mpc-guided,
arxiv-667373
2410.06558
Deep Correlated Prompting for Visual Recognition with Missing Modalities
<|reference_start|>Deep Correlated Prompting for Visual Recognition with Missing Modalities: Large-scale multimodal models have shown excellent performance over a series of tasks powered by the large corpus of paired multimodal training data. Generally, they are always assumed to receive modality-complete inputs. However, this simple assumption may not always hold in the real world due to privacy constraints or collection difficulty, where models pretrained on modality-complete data easily demonstrate degraded performance on missing-modality cases. To handle this issue, we refer to prompt learning to adapt large pretrained multimodal models to handle missing-modality scenarios by regarding different missing cases as different types of input. Instead of only prepending independent prompts to the intermediate layers, we present to leverage the correlations between prompts and input features and excavate the relationships between different layers of prompts to carefully design the instructions. We also incorporate the complementary semantics of different modalities to guide the prompting design for each modality. Extensive experiments on three commonly-used datasets consistently demonstrate the superiority of our method compared to the previous approaches upon different missing scenarios. Plentiful ablations are further given to show the generalizability and reliability of our method upon different modality-missing ratios and types.<|reference_end|>
arxiv
@article{hu2024deep, title={Deep Correlated Prompting for Visual Recognition with Missing Modalities}, author={Lianyu Hu, Tongkai Shi, Wei Feng, Fanhua Shang, Liang Wan}, journal={arXiv preprint arXiv:2410.06558}, year={2024}, archivePrefix={arXiv}, eprint={2410.06558}, primaryClass={cs.CV} }
hu2024deep
arxiv-667374
2410.06560
Mitigating Time Discretization Challenges with WeatherODE: A Sandwich Physics-Driven Neural ODE for Weather Forecasting
<|reference_start|>Mitigating Time Discretization Challenges with WeatherODE: A Sandwich Physics-Driven Neural ODE for Weather Forecasting: In the field of weather forecasting, traditional models often grapple with discretization errors and time-dependent source discrepancies, which limit their predictive performance. In this paper, we present WeatherODE, a novel one-stage, physics-driven ordinary differential equation (ODE) model designed to enhance weather forecasting accuracy. By leveraging wave equation theory and integrating a time-dependent source model, WeatherODE effectively addresses the challenges associated with time-discretization error and dynamic atmospheric processes. Moreover, we design a CNN-ViT-CNN sandwich structure, facilitating efficient learning dynamics tailored for distinct yet interrelated tasks with varying optimization biases in advection equation estimation. Through rigorous experiments, WeatherODE demonstrates superior performance in both global and regional weather forecasting tasks, outperforming recent state-of-the-art approaches by significant margins of over 40.0\% and 31.8\% in root mean square error (RMSE), respectively. The source code is available at \url{https://github.com/DAMO-DI-ML/WeatherODE}.<|reference_end|>
arxiv
@article{liu2024mitigating, title={Mitigating Time Discretization Challenges with WeatherODE: A Sandwich Physics-Driven Neural ODE for Weather Forecasting}, author={Peiyuan Liu, Tian Zhou, Liang Sun, Rong Jin}, journal={arXiv preprint arXiv:2410.06560}, year={2024}, archivePrefix={arXiv}, eprint={2410.06560}, primaryClass={cs.LG cs.AI} }
liu2024mitigating
arxiv-667375
2410.06561
Efficient and Robust Knowledge Distillation from A Stronger Teacher Based on Correlation Matching
<|reference_start|>Efficient and Robust Knowledge Distillation from A Stronger Teacher Based on Correlation Matching: Knowledge Distillation (KD) has emerged as a pivotal technique for neural network compression and performance enhancement. Most KD methods aim to transfer dark knowledge from a cumbersome teacher model to a lightweight student model based on Kullback-Leibler (KL) divergence loss. However, the student performance improvements achieved through KD exhibit diminishing marginal returns, where a stronger teacher model does not necessarily lead to a proportionally stronger student model. To address this issue, we empirically find that the KL-based KD method may implicitly change the inter-class relationships learned by the student model, resulting in a more complex and ambiguous decision boundary, which in turn reduces the model's accuracy and generalization ability. Therefore, this study argues that the student model should learn not only the probability values from the teacher's output but also the relative ranking of classes, and proposes a novel Correlation Matching Knowledge Distillation (CMKD) method that combines the Pearson and Spearman correlation coefficients-based KD loss to achieve more efficient and robust distillation from a stronger teacher model. Moreover, considering that samples vary in difficulty, CMKD dynamically adjusts the weights of the Pearson-based loss and Spearman-based loss. CMKD is simple yet practical, and extensive experiments demonstrate that it can consistently achieve state-of-the-art performance on CIRAR-100 and ImageNet, and adapts well to various teacher architectures, sizes, and other KD methods.<|reference_end|>
arxiv
@article{niu2024efficient, title={Efficient and Robust Knowledge Distillation from A Stronger Teacher Based on Correlation Matching}, author={Wenqi Niu, Yingchao Wang, Guohui Cai and Hanpo Hou}, journal={arXiv preprint arXiv:2410.06561}, year={2024}, archivePrefix={arXiv}, eprint={2410.06561}, primaryClass={cs.LG cs.AI} }
niu2024efficient
arxiv-667376
2410.06565
Agile Mobility with Rapid Online Adaptation via Meta-learning and Uncertainty-aware MPPI
<|reference_start|>Agile Mobility with Rapid Online Adaptation via Meta-learning and Uncertainty-aware MPPI: Modern non-linear model-based controllers require an accurate physics model and model parameters to be able to control mobile robots at their limits. Also, due to surface slipping at high speeds, the friction parameters may continually change (like tire degradation in autonomous racing), and the controller may need to adapt rapidly. Many works derive a task-specific robot model with a parameter adaptation scheme that works well for the task but requires a lot of effort and tuning for each platform and task. In this work, we design a full model-learning-based controller based on meta pre-training that can very quickly adapt using few-shot dynamics data to any wheel-based robot with any model parameters, while also reasoning about model uncertainty. We demonstrate our results in small-scale numeric simulation, the large-scale Unity simulator, and on a medium-scale hardware platform with a wide range of settings. We show that our results are comparable to domain-specific well-engineered controllers, and have excellent generalization performance across all scenarios.<|reference_end|>
arxiv
@article{kalaria2024agile, title={Agile Mobility with Rapid Online Adaptation via Meta-learning and Uncertainty-aware MPPI}, author={Dvij Kalaria, Haoru Xue, Wenli Xiao, Tony Tao, Guanya Shi, and John M. Dolan}, journal={arXiv preprint arXiv:2410.06565}, year={2024}, archivePrefix={arXiv}, eprint={2410.06565}, primaryClass={cs.RO} }
kalaria2024agile
arxiv-667377
2410.06566
Detecting Bias and Enhancing Diagnostic Accuracy in Large Language Models for Healthcare
<|reference_start|>Detecting Bias and Enhancing Diagnostic Accuracy in Large Language Models for Healthcare: Biased AI-generated medical advice and misdiagnoses can jeopardize patient safety, making the integrity of AI in healthcare more critical than ever. As Large Language Models (LLMs) take on a growing role in medical decision-making, addressing their biases and enhancing their accuracy is key to delivering safe, reliable care. This study addresses these challenges head-on by introducing new resources designed to promote ethical and precise AI in healthcare. We present two datasets: BiasMD, featuring 6,007 question-answer pairs crafted to evaluate and mitigate biases in health-related LLM outputs, and DiseaseMatcher, with 32,000 clinical question-answer pairs spanning 700 diseases, aimed at assessing symptom-based diagnostic accuracy. Using these datasets, we developed the EthiClinician, a fine-tuned model built on the ChatDoctor framework, which outperforms GPT-4 in both ethical reasoning and clinical judgment. By exposing and correcting hidden biases in existing models for healthcare, our work sets a new benchmark for safer, more reliable patient outcomes.<|reference_end|>
arxiv
@article{zahraei2024detecting, title={Detecting Bias and Enhancing Diagnostic Accuracy in Large Language Models for Healthcare}, author={Pardis Sadat Zahraei and Zahra Shakeri}, journal={arXiv preprint arXiv:2410.06566}, year={2024}, archivePrefix={arXiv}, eprint={2410.06566}, primaryClass={cs.CL} }
zahraei2024detecting
arxiv-667378
2410.06567
Convex Distillation: Efficient Compression of Deep Networks via Convex Optimization
<|reference_start|>Convex Distillation: Efficient Compression of Deep Networks via Convex Optimization: Deploying large and complex deep neural networks on resource-constrained edge devices poses significant challenges due to their computational demands and the complexities of non-convex optimization. Traditional compression methods such as distillation and pruning often retain non-convexity that complicates fine-tuning in real-time on such devices. Moreover, these methods often necessitate extensive end-to-end network fine-tuning after compression to preserve model performance, which is not only time-consuming but also requires fully annotated datasets, thus potentially negating the benefits of efficient network compression. In this paper, we introduce a novel distillation technique that efficiently compresses the model via convex optimization -- eliminating intermediate non-convex activation functions and using only intermediate activations from the original model. Our approach enables distillation in a label-free data setting and achieves performance comparable to the original model without requiring any post-compression fine-tuning. We demonstrate the effectiveness of our method for image classification models on multiple standard datasets, and further show that in the data limited regime, our method can outperform standard non-convex distillation approaches. Our method promises significant advantages for deploying high-efficiency, low-footprint models on edge devices, making it a practical choice for real-world applications. We show that convex neural networks, when provided with rich feature representations from a large pre-trained non-convex model, can achieve performance comparable to their non-convex counterparts, opening up avenues for future research at the intersection of convex optimization and deep learning.<|reference_end|>
arxiv
@article{varshney2024convex, title={Convex Distillation: Efficient Compression of Deep Networks via Convex Optimization}, author={Prateek Varshney and Mert Pilanci}, journal={arXiv preprint arXiv:2410.06567}, year={2024}, archivePrefix={arXiv}, eprint={2410.06567}, primaryClass={cs.LG} }
varshney2024convex
arxiv-667379
2410.06570
Disturbance Observer-based Control Barrier Functions with Residual Model Learning for Safe Reinforcement Learning
<|reference_start|>Disturbance Observer-based Control Barrier Functions with Residual Model Learning for Safe Reinforcement Learning: Reinforcement learning (RL) agents need to explore their environment to learn optimal behaviors and achieve maximum rewards. However, exploration can be risky when training RL directly on real systems, while simulation-based training introduces the tricky issue of the sim-to-real gap. Recent approaches have leveraged safety filters, such as control barrier functions (CBFs), to penalize unsafe actions during RL training. However, the strong safety guarantees of CBFs rely on a precise dynamic model. In practice, uncertainties always exist, including internal disturbances from the errors of dynamics and external disturbances such as wind. In this work, we propose a new safe RL framework based on disturbance rejection-guarded learning, which allows for an almost model-free RL with an assumed but not necessarily precise nominal dynamic model. We demonstrate our results on the Safety-gym benchmark for Point and Car robots on all tasks where we can outperform state-of-the-art approaches that use only residual model learning or a disturbance observer (DOB). We further validate the efficacy of our framework using a physical F1/10 racing car. Videos: https://sites.google.com/view/res-dob-cbf-rl<|reference_end|>
arxiv
@article{kalaria2024disturbance, title={Disturbance Observer-based Control Barrier Functions with Residual Model Learning for Safe Reinforcement Learning}, author={Dvij Kalaria, Qin Lin and John M. Dolan}, journal={arXiv preprint arXiv:2410.06570}, year={2024}, archivePrefix={arXiv}, eprint={2410.06570}, primaryClass={cs.RO} }
kalaria2024disturbance
arxiv-667380
2410.06572
Can DeepFake Speech be Reliably Detected?
<|reference_start|>Can DeepFake Speech be Reliably Detected?: Recent advances in text-to-speech (TTS) systems, particularly those with voice cloning capabilities, have made voice impersonation readily accessible, raising ethical and legal concerns due to potential misuse for malicious activities like misinformation campaigns and fraud. While synthetic speech detectors (SSDs) exist to combat this, they are vulnerable to ``test domain shift", exhibiting decreased performance when audio is altered through transcoding, playback, or background noise. This vulnerability is further exacerbated by deliberate manipulation of synthetic speech aimed at deceiving detectors. This work presents the first systematic study of such active malicious attacks against state-of-the-art open-source SSDs. White-box attacks, black-box attacks, and their transferability are studied from both attack effectiveness and stealthiness, using both hardcoded metrics and human ratings. The results highlight the urgent need for more robust detection methods in the face of evolving adversarial threats.<|reference_end|>
arxiv
@article{liu2024can, title={Can DeepFake Speech be Reliably Detected?}, author={Hongbin Liu, Youzheng Chen, Arun Narayanan, Athula Balachandran, Pedro J. Moreno, Lun Wang}, journal={arXiv preprint arXiv:2410.06572}, year={2024}, archivePrefix={arXiv}, eprint={2410.06572}, primaryClass={cs.SD cs.CR cs.LG} }
liu2024can
arxiv-667381
2410.06576
On The Relationship between Visual Anomaly-free and Anomalous Representations
<|reference_start|>On The Relationship between Visual Anomaly-free and Anomalous Representations: Anomaly Detection is an important problem within computer vision, having variety of real-life applications. Yet, the current set of solutions to this problem entail known, systematic shortcomings. Specifically, contemporary surface Anomaly Detection task assumes the presence of multiple specific anomaly classes e.g. cracks, rusting etc., unlike one-class classification model of past. However, building a deep learning model in such setup remains a challenge because anomalies arise rarely, and hence anomaly samples are quite scarce. Transfer learning has been a preferred paradigm in such situations. But the typical source domains with large dataset sizes e.g. ImageNet, JFT-300M, LAION-2B do not correlate well with the domain of surfaces and materials, an important premise of transfer learning. In this paper, we make an important hypothesis and show, by exhaustive experimentation, that the space of anomaly-free visual patterns of the normal samples correlates well with each of the various spaces of anomalous patterns of the class-specific anomaly samples. The first results of using this hypothesis in transfer learning have indeed been quite encouraging. We expect that finding such a simple closeby domain that readily entails large number of samples, and which also oftentimes shows interclass separability though with narrow margins, will be a useful discovery. Especially, it is expected to improve domain adaptation for anomaly detection, and few-shot learning for anomaly detection, making in-the-wild anomaly detection realistically possible in future.<|reference_end|>
arxiv
@article{sadrani2024on, title={On The Relationship between Visual Anomaly-free and Anomalous Representations}, author={Riya Sadrani, Hrishikesh Sharma and Ayush Bachan}, journal={arXiv preprint arXiv:2410.06576}, year={2024}, archivePrefix={arXiv}, eprint={2410.06576}, primaryClass={cs.CV} }
sadrani2024on
arxiv-667382
2410.06577
Rodimus*: Breaking the Accuracy-Efficiency Trade-Off with Efficient Attentions
<|reference_start|>Rodimus*: Breaking the Accuracy-Efficiency Trade-Off with Efficient Attentions: Recent advancements in Transformer-based large language models (LLMs) have set new standards in natural language processing. However, the classical softmax attention incurs significant computational costs, leading to a $O(T)$ complexity for per-token generation, where $T$ represents the context length. This work explores reducing LLMs' complexity while maintaining performance by introducing Rodimus and its enhanced version, Rodimus$+$. Rodimus employs an innovative data-dependent tempered selection (DDTS) mechanism within a linear attention-based, purely recurrent framework, achieving significant accuracy while drastically reducing the memory usage typically associated with recurrent models. This method exemplifies semantic compression by maintaining essential input information with fixed-size hidden states. Building on this, Rodimus$+$ combines Rodimus with the innovative Sliding Window Shared-Key Attention (SW-SKA) in a hybrid approach, effectively leveraging the complementary semantic, token, and head compression techniques. Our experiments demonstrate that Rodimus$+$-1.6B, trained on 1 trillion tokens, achieves superior downstream performance against models trained on more tokens, including Qwen2-1.5B and RWKV6-1.6B, underscoring its potential to redefine the accuracy-efficiency balance in LLMs. Model code and pre-trained checkpoints will be available soon.<|reference_end|>
arxiv
@article{he2024rodimus*:, title={Rodimus*: Breaking the Accuracy-Efficiency Trade-Off with Efficient Attentions}, author={Zhihao He, Hang Yu, Zi Gong, Shizhan Liu, Jianguo Li, Weiyao Lin}, journal={arXiv preprint arXiv:2410.06577}, year={2024}, archivePrefix={arXiv}, eprint={2410.06577}, primaryClass={cs.CL} }
he2024rodimus*:
arxiv-667383
2410.06581
Enhancing Legal Case Retrieval via Scaling High-quality Synthetic Query-Candidate Pairs
<|reference_start|>Enhancing Legal Case Retrieval via Scaling High-quality Synthetic Query-Candidate Pairs: Legal case retrieval (LCR) aims to provide similar cases as references for a given fact description. This task is crucial for promoting consistent judgments in similar cases, effectively enhancing judicial fairness and improving work efficiency for judges. However, existing works face two main challenges for real-world applications: existing works mainly focus on case-to-case retrieval using lengthy queries, which does not match real-world scenarios; and the limited data scale, with current datasets containing only hundreds of queries, is insufficient to satisfy the training requirements of existing data-hungry neural models. To address these issues, we introduce an automated method to construct synthetic query-candidate pairs and build the largest LCR dataset to date, LEAD, which is hundreds of times larger than existing datasets. This data construction method can provide ample training signals for LCR models. Experimental results demonstrate that model training with our constructed data can achieve state-of-the-art results on two widely-used LCR benchmarks. Besides, the construction method can also be applied to civil cases and achieve promising results. The data and codes can be found in https://github.com/thunlp/LEAD.<|reference_end|>
arxiv
@article{gao2024enhancing, title={Enhancing Legal Case Retrieval via Scaling High-quality Synthetic Query-Candidate Pairs}, author={Cheng Gao, Chaojun Xiao, Zhenghao Liu, Huimin Chen, Zhiyuan Liu, Maosong Sun}, journal={arXiv preprint arXiv:2410.06581}, year={2024}, archivePrefix={arXiv}, eprint={2410.06581}, primaryClass={cs.IR} }
gao2024enhancing
arxiv-667384
2410.06583
A short note about the learning-augmented secretary problem
<|reference_start|>A short note about the learning-augmented secretary problem: We consider the secretary problem through the lens of learning-augmented algorithms. As it is known that the best possible expected competitive ratio is $1/e$ in the classic setting without predictions, a natural goal is to design algorithms that are 1-consistent and $1/e$-robust. Unfortunately, [FY24] provided hardness constructions showing that such a goal is not attainable when the candidates' true values are allowed to scale with $n$. Here, we provide a simple and explicit alternative hardness construction showing that such a goal is not achievable even when the candidates' true values are constants that do not scale with $n$.<|reference_end|>
arxiv
@article{choo2024a, title={A short note about the learning-augmented secretary problem}, author={Davin Choo, Chun Kai Ling}, journal={arXiv preprint arXiv:2410.06583}, year={2024}, archivePrefix={arXiv}, eprint={2410.06583}, primaryClass={cs.DS} }
choo2024a
arxiv-667385
2410.06584
Two Birds With One Stone: Enhancing Communication and Sensing via Multi-Functional RIS
<|reference_start|>Two Birds With One Stone: Enhancing Communication and Sensing via Multi-Functional RIS: In this article, we propose new network architectures that integrate multi-functional reconfigurable intelligent surfaces (MF-RISs) into 6G networks to enhance both communication and sensing capabilities. Firstly, we elaborate how to leverage MF-RISs for improving communication performance in different communication modes including unicast, mulitcast, and broadcast and for different multi-access schemes. Next, we emphasize synergistic benefits of integrating MF-RISs with wireless sensing, enabling more accurate and efficient target detection in 6G networks. Furthermore, we present two schemes that utilize MF-RISs to enhance the performance of integrated sensing and communication (ISAC). We also study multi-objective optimization to achieve the optimal trade-off between communication and sensing performance. Finally, we present numerical results to show the performance improvements offered by MF-RISs compared to conventional RISs in ISAC. We also outline key research directions for MF-RIS under the ambition of 6G.<|reference_end|>
arxiv
@article{ni2024two, title={Two Birds With One Stone: Enhancing Communication and Sensing via Multi-Functional RIS}, author={Wanli Ni, Wen Wang, Ailing Zheng, Peng Wang, Changsheng You, Yonina C. Eldar, Dusit Niyato, Robert Schober}, journal={arXiv preprint arXiv:2410.06584}, year={2024}, archivePrefix={arXiv}, eprint={2410.06584}, primaryClass={eess.SP cs.SY eess.SY} }
ni2024two
arxiv-667386
2410.06587
Bots can Snoop: Uncovering and Mitigating Privacy Risks of Bots in Group Chats
<|reference_start|>Bots can Snoop: Uncovering and Mitigating Privacy Risks of Bots in Group Chats: New privacy concerns arise with chatbots on group messaging platforms. Chatbots may access information beyond their intended functionalities, such as messages unintended for chatbots or sender's identities. Chatbot operators may exploit such information to infer personal information and link users across groups, potentially leading to personal data breaches, pervasive tracking, and targeted advertising. Our analysis of conversation datasets shows that (1) chatbots often access far more messages than needed, and (2) when a user joins a new group with chatbots, there is a 3.4% chance that at least one of the chatbots can recognize and associate the user with their previous interactions in other groups. Although state-of-the-art group messaging protocols provide robust end-to-end security and some platforms have implemented policies to limit chatbot access, no platforms successfully combine these features. This paper introduces SnoopGuard, a secure group messaging protocol that ensures user privacy against chatbots while maintaining strong end-to-end security. Our method offers selective message access, preventing chatbots from accessing unrelated messages, and ensures sender anonymity within the group. SnoopGuard achieves $O(\log n + m)$ message-sending complexity for a group of $n$ users and $m$ chatbots, compared to $O(\log(n + m))$ in state-of-the-art protocols, with acceptable overhead for enhanced privacy. Our prototype implementation shows that sending a message in a group of 50 users and 10 chatbots takes about 30 milliseconds when integrated with Message Layer Security (MLS).<|reference_end|>
arxiv
@article{chou2024bots, title={Bots can Snoop: Uncovering and Mitigating Privacy Risks of Bots in Group Chats}, author={Kai-Hsiang Chou, Yi-Min Lin, Yi-An Wang, Jonathan Weiping Li, Tiffany Hyun-Jin Kim, Hsu-Chun Hsiao}, journal={arXiv preprint arXiv:2410.06587}, year={2024}, archivePrefix={arXiv}, eprint={2410.06587}, primaryClass={cs.CR} }
chou2024bots
arxiv-667387
2410.06589
A Family of LZ78-based Universal Sequential Probability Assignments
<|reference_start|>A Family of LZ78-based Universal Sequential Probability Assignments: We propose and study a family of universal sequential probability assignments on individual sequences, based on the incremental parsing procedure of the Lempel-Ziv (LZ78) compression algorithm. We show that the normalized log loss under any of these models converges to the normalized LZ78 codelength, uniformly over all individual sequences. To establish the universality of these models, we consolidate a set of results from the literature relating finite-state compressibility to optimal log-loss under Markovian and finite-state models. We also consider some theoretical and computational properties of these models when viewed as probabilistic sources. Finally, we present experimental results showcasing the potential benefit of using this family -- as models and as sources -- for compression, generation, and classification.<|reference_end|>
arxiv
@article{sagan2024a, title={A Family of LZ78-based Universal Sequential Probability Assignments}, author={Naomi Sagan and Tsachy Weissman}, journal={arXiv preprint arXiv:2410.06589}, year={2024}, archivePrefix={arXiv}, eprint={2410.06589}, primaryClass={cs.IT math.IT} }
sagan2024a
arxiv-667388
2410.06593
Towards Natural Image Matting in the Wild via Real-Scenario Prior
<|reference_start|>Towards Natural Image Matting in the Wild via Real-Scenario Prior: Recent approaches attempt to adapt powerful interactive segmentation models, such as SAM, to interactive matting and fine-tune the models based on synthetic matting datasets. However, models trained on synthetic data fail to generalize to complex and occlusion scenes. We address this challenge by proposing a new matting dataset based on the COCO dataset, namely COCO-Matting. Specifically, the construction of our COCO-Matting includes accessory fusion and mask-to-matte, which selects real-world complex images from COCO and converts semantic segmentation masks to matting labels. The built COCO-Matting comprises an extensive collection of 38,251 human instance-level alpha mattes in complex natural scenarios. Furthermore, existing SAM-based matting methods extract intermediate features and masks from a frozen SAM and only train a lightweight matting decoder by end-to-end matting losses, which do not fully exploit the potential of the pre-trained SAM. Thus, we propose SEMat which revamps the network architecture and training objectives. For network architecture, the proposed feature-aligned transformer learns to extract fine-grained edge and transparency features. The proposed matte-aligned decoder aims to segment matting-specific objects and convert coarse masks into high-precision mattes. For training objectives, the proposed regularization and trimap loss aim to retain the prior from the pre-trained model and push the matting logits extracted from the mask decoder to contain trimap-based semantic information. Extensive experiments across seven diverse datasets demonstrate the superior performance of our method, proving its efficacy in interactive natural image matting. We open-source our code, models, and dataset at https://github.com/XiaRho/SEMat.<|reference_end|>
arxiv
@article{xia2024towards, title={Towards Natural Image Matting in the Wild via Real-Scenario Prior}, author={Ruihao Xia, Yu Liang, Peng-Tao Jiang, Hao Zhang, Qianru Sun, Yang Tang, Bo Li, Pan Zhou}, journal={arXiv preprint arXiv:2410.06593}, year={2024}, archivePrefix={arXiv}, eprint={2410.06593}, primaryClass={cs.CV} }
xia2024towards
arxiv-667389
2410.06600
DDRN:a Data Distribution Reconstruction Network for Occluded Person Re-Identification
<|reference_start|>DDRN:a Data Distribution Reconstruction Network for Occluded Person Re-Identification: In occluded person re-identification(ReID), severe occlusions lead to a significant amount of irrelevant information that hinders the accurate identification of individuals. These irrelevant cues primarily stem from background interference and occluding interference, adversely affecting the final retrieval results. Traditional discriminative models, which rely on the specific content and positions of the images, often misclassify in cases of occlusion. To address these limitations, we propose the Data Distribution Reconstruction Network (DDRN), a generative model that leverages data distribution to filter out irrelevant details, enhancing overall feature perception ability and reducing irrelevant feature interference. Additionally, severe occlusions lead to the complexity of the feature space. To effectively handle this, we design a multi-center approach through the proposed Hierarchical SubcenterArcface (HS-Arcface) loss function, which can better approximate complex feature spaces. On the Occluded-Duke dataset, we achieved a mAP of 62.4\% (+1.1\%) and a rank-1 accuracy of 71.3\% (+0.6\%), surpassing the latest state-of-the-art methods(FRT) significantly.<|reference_end|>
arxiv
@article{wang2024ddrn:a, title={DDRN:a Data Distribution Reconstruction Network for Occluded Person Re-Identification}, author={Zhaoyong Wang, Yujie Liu, Mingyue Li, Wenxin Zhang, Zongmin Li}, journal={arXiv preprint arXiv:2410.06600}, year={2024}, archivePrefix={arXiv}, eprint={2410.06600}, primaryClass={cs.CV} }
wang2024ddrn:a
arxiv-667390
2410.06606
Dissecting Fine-Tuning Unlearning in Large Language Models
<|reference_start|>Dissecting Fine-Tuning Unlearning in Large Language Models: Fine-tuning-based unlearning methods prevail for preventing targeted harmful, sensitive, or copyrighted information within large language models while preserving overall capabilities. However, the true effectiveness of these methods is unclear. In this paper, we delve into the limitations of fine-tuning-based unlearning through activation patching and parameter restoration experiments. Our findings reveal that these methods alter the model's knowledge retrieval process, rather than genuinely erasing the problematic knowledge embedded in the model parameters. Furthermore, behavioral tests demonstrate that the unlearning mechanisms inevitably impact the global behavior of the models, affecting unrelated knowledge or capabilities. Our work advocates the development of more resilient unlearning techniques for truly erasing knowledge. Our code is released at https://github.com/yihuaihong/Dissecting-FT-Unlearning.<|reference_end|>
arxiv
@article{hong2024dissecting, title={Dissecting Fine-Tuning Unlearning in Large Language Models}, author={Yihuai Hong, Yuelin Zou, Lijie Hu, Ziqian Zeng, Di Wang, Haiqin Yang}, journal={arXiv preprint arXiv:2410.06606}, year={2024}, archivePrefix={arXiv}, eprint={2410.06606}, primaryClass={cs.CL cs.LG} }
hong2024dissecting
arxiv-667391
2410.06608
Bahasa Harmony: A Comprehensive Dataset for Bahasa Text-to-Speech Synthesis with Discrete Codec Modeling of EnGen-TTS
<|reference_start|>Bahasa Harmony: A Comprehensive Dataset for Bahasa Text-to-Speech Synthesis with Discrete Codec Modeling of EnGen-TTS: This research introduces a comprehensive Bahasa text-to-speech (TTS) dataset and a novel TTS model, EnGen-TTS, designed to enhance the quality and versatility of synthetic speech in the Bahasa language. The dataset, spanning \textasciitilde55.0 hours and 52K audio recordings, integrates diverse textual sources, ensuring linguistic richness. A meticulous recording setup captures the nuances of Bahasa phonetics, employing professional equipment to ensure high-fidelity audio samples. Statistical analysis reveals the dataset's scale and diversity, laying the foundation for model training and evaluation. The proposed EnGen-TTS model performs better than established baselines, achieving a Mean Opinion Score (MOS) of 4.45 $\pm$ 0.13. Additionally, our investigation on real-time factor and model size highlights EnGen-TTS as a compelling choice, with efficient performance. This research marks a significant advancement in Bahasa TTS technology, with implications for diverse language applications. Link to Generated Samples: \url{https://bahasa-harmony-comp.vercel.app/}<|reference_end|>
arxiv
@article{susladkar2024bahasa, title={Bahasa Harmony: A Comprehensive Dataset for Bahasa Text-to-Speech Synthesis with Discrete Codec Modeling of EnGen-TTS}, author={Onkar Kishor Susladkar, Vishesh Tripathi, Biddwan Ahmed}, journal={EMNLP 2024}, year={2024}, archivePrefix={arXiv}, eprint={2410.06608}, primaryClass={cs.SD cs.AI eess.AS} }
susladkar2024bahasa
arxiv-667392
2410.06613
ES-Gaussian: Gaussian Splatting Mapping via Error Space-Based Gaussian Completion
<|reference_start|>ES-Gaussian: Gaussian Splatting Mapping via Error Space-Based Gaussian Completion: Accurate and affordable indoor 3D reconstruction is critical for effective robot navigation and interaction. Traditional LiDAR-based mapping provides high precision but is costly, heavy, and power-intensive, with limited ability for novel view rendering. Vision-based mapping, while cost-effective and capable of capturing visual data, often struggles with high-quality 3D reconstruction due to sparse point clouds. We propose ES-Gaussian, an end-to-end system using a low-altitude camera and single-line LiDAR for high-quality 3D indoor reconstruction. Our system features Visual Error Construction (VEC) to enhance sparse point clouds by identifying and correcting areas with insufficient geometric detail from 2D error maps. Additionally, we introduce a novel 3DGS initialization method guided by single-line LiDAR, overcoming the limitations of traditional multi-view setups and enabling effective reconstruction in resource-constrained environments. Extensive experimental results on our new Dreame-SR dataset and a publicly available dataset demonstrate that ES-Gaussian outperforms existing methods, particularly in challenging scenarios. The project page is available at https://chenlu-china.github.io/ES-Gaussian/.<|reference_end|>
arxiv
@article{chen2024es-gaussian:, title={ES-Gaussian: Gaussian Splatting Mapping via Error Space-Based Gaussian Completion}, author={Lu Chen, Yingfu Zeng, Haoang Li, Zhitao Deng, Jiafu Yan, Zhenjun Zhao}, journal={arXiv preprint arXiv:2410.06613}, year={2024}, archivePrefix={arXiv}, eprint={2410.06613}, primaryClass={cs.CV cs.RO} }
chen2024es-gaussian:
arxiv-667393
2410.06614
Pair-VPR: Place-Aware Pre-training and Contrastive Pair Classification for Visual Place Recognition with Vision Transformers
<|reference_start|>Pair-VPR: Place-Aware Pre-training and Contrastive Pair Classification for Visual Place Recognition with Vision Transformers: In this work we propose a novel joint training method for Visual Place Recognition (VPR), which simultaneously learns a global descriptor and a pair classifier for re-ranking. The pair classifier can predict whether a given pair of images are from the same place or not. The network only comprises Vision Transformer components for both the encoder and the pair classifier, and both components are trained using their respective class tokens. In existing VPR methods, typically the network is initialized using pre-trained weights from a generic image dataset such as ImageNet. In this work we propose an alternative pre-training strategy, by using Siamese Masked Image Modelling as a pre-training task. We propose a Place-aware image sampling procedure from a collection of large VPR datasets for pre-training our model, to learn visual features tuned specifically for VPR. By re-using the Mask Image Modelling encoder and decoder weights in the second stage of training, Pair-VPR can achieve state-of-the-art VPR performance across five benchmark datasets with a ViT-B encoder, along with further improvements in localization recall with larger encoders. The Pair-VPR website is: https://csiro-robotics.github.io/Pair-VPR.<|reference_end|>
arxiv
@article{hausler2024pair-vpr:, title={Pair-VPR: Place-Aware Pre-training and Contrastive Pair Classification for Visual Place Recognition with Vision Transformers}, author={Stephen Hausler and Peyman Moghadam}, journal={arXiv preprint arXiv:2410.06614}, year={2024}, archivePrefix={arXiv}, eprint={2410.06614}, primaryClass={cs.RO cs.AI cs.CV} }
hausler2024pair-vpr:
arxiv-667394
2410.06615
$\beta$-calibration of Language Model Confidence Scores for Generative QA
<|reference_start|>$\beta$-calibration of Language Model Confidence Scores for Generative QA: To use generative question-and-answering (QA) systems for decision-making and in any critical application, these systems need to provide well-calibrated confidence scores that reflect the correctness of their answers. Existing calibration methods aim to ensure that the confidence score is on average indicative of the likelihood that the answer is correct. We argue, however, that this standard (average-case) notion of calibration is difficult to interpret for decision-making in generative QA. To address this, we generalize the standard notion of average calibration and introduce $\beta$-calibration, which ensures calibration holds across different question-and-answer groups. We then propose discretized posthoc calibration schemes for achieving $\beta$-calibration.<|reference_end|>
arxiv
@article{manggala2024$\beta$-calibration, title={$\beta$-calibration of Language Model Confidence Scores for Generative QA}, author={Putra Manggala, Atalanti Mastakouri, Elke Kirschbaum, Shiva Prasad Kasiviswanathan, Aaditya Ramdas}, journal={arXiv preprint arXiv:2410.06615}, year={2024}, archivePrefix={arXiv}, eprint={2410.06615}, primaryClass={cs.CL cs.LG} }
manggala2024$\beta$-calibration
arxiv-667395
2410.06617
Learning Evolving Tools for Large Language Models
<|reference_start|>Learning Evolving Tools for Large Language Models: Tool learning enables large language models (LLMs) to interact with external tools and APIs, greatly expanding the application scope of LLMs. However, due to the dynamic nature of external environments, these tools and APIs may become outdated over time, preventing LLMs from correctly invoking tools. Existing research primarily focuses on static environments and overlooks this issue, limiting the adaptability of LLMs in real-world applications. In this paper, we propose ToolEVO, a novel framework designed to enhance the adaptive and reflective capabilities of LLMs against tool variability. By leveraging Monte Carlo Tree Search, ToolEVO facilitates active exploration and interaction of LLMs within dynamic environments, allowing for autonomous self-reflection and self-updating of tool usage based on environmental feedback. Additionally, we introduce ToolQA-D, a benchmark specifically designed to evaluate the impact of tool variability. Extensive experiments demonstrate the effectiveness and stability of our approach, highlighting the importance of adaptability to tool variability for effective tool learning.<|reference_end|>
arxiv
@article{chen2024learning, title={Learning Evolving Tools for Large Language Models}, author={Guoxin Chen, Zhong Zhang, Xin Cong, Fangda Guo, Yesai Wu, Yankai Lin, Wenzheng Feng, Yasheng Wang}, journal={arXiv preprint arXiv:2410.06617}, year={2024}, archivePrefix={arXiv}, eprint={2410.06617}, primaryClass={cs.CL cs.AI} }
chen2024learning
arxiv-667396
2410.06618
Decomposing Relationship from 1-to-N into N 1-to-1 for Text-Video Retrieval
<|reference_start|>Decomposing Relationship from 1-to-N into N 1-to-1 for Text-Video Retrieval: Text-video retrieval (TVR) has seen substantial advancements in recent years, fueled by the utilization of pre-trained models and large language models (LLMs). Despite these advancements, achieving accurate matching in TVR remains challenging due to inherent disparities between video and textual modalities and irregularities in data representation. In this paper, we propose Text-Video-ProxyNet (TV-ProxyNet), a novel framework designed to decompose the conventional 1-to-N relationship of TVR into N distinct 1-to-1 relationships. By replacing a single text query with a series of text proxies, TV-ProxyNet not only broadens the query scope but also achieves a more precise expansion. Each text proxy is crafted through a refined iterative process, controlled by mechanisms we term as the director and dash, which regulate the proxy's direction and distance relative to the original text query. This setup not only facilitates more precise semantic alignment but also effectively manages the disparities and noise inherent in multimodal data. Our experiments on three representative video-text retrieval benchmarks, MSRVTT, DiDeMo, and ActivityNet Captions, demonstrate the effectiveness of TV-ProxyNet. The results show an improvement of 2.0% to 3.3% in R@1 over the baseline. TV-ProxyNet achieved state-of-the-art performance on MSRVTT and ActivityNet Captions, and a 2.0% improvement on DiDeMo compared to existing methods, validating our approach's ability to enhance semantic mapping and reduce error propensity.<|reference_end|>
arxiv
@article{xiao2024decomposing, title={Decomposing Relationship from 1-to-N into N 1-to-1 for Text-Video Retrieval}, author={Jian Xiao, Zhenzhen Hu, Jia Li, Richang Hong}, journal={arXiv preprint arXiv:2410.06618}, year={2024}, archivePrefix={arXiv}, eprint={2410.06618}, primaryClass={cs.CV cs.IR cs.MM} }
xiao2024decomposing
arxiv-667397
2410.06620
Task Coordination and Trajectory Optimization for Multi-Aerial Systems via Signal Temporal Logic: A Wind Turbine Inspection Study
<|reference_start|>Task Coordination and Trajectory Optimization for Multi-Aerial Systems via Signal Temporal Logic: A Wind Turbine Inspection Study: This paper presents a method for task allocation and trajectory generation in cooperative inspection missions using a fleet of multirotor drones, with a focus on wind turbine inspection. The approach generates safe, feasible flight paths that adhere to time-sensitive constraints and vehicle limitations by formulating an optimization problem based on Signal Temporal Logic (STL) specifications. An event-triggered replanning mechanism addresses unexpected events and delays, while a generalized robustness scoring method incorporates user preferences and minimizes task conflicts. The approach is validated through simulations in MATLAB and Gazebo, as well as field experiments in a mock-up scenario.<|reference_end|>
arxiv
@article{silano2024task, title={Task Coordination and Trajectory Optimization for Multi-Aerial Systems via Signal Temporal Logic: A Wind Turbine Inspection Study}, author={Giuseppe Silano, Alvaro Caballero, Davide Liuzza, Luigi Iannelli, Stjepan Bogdan, and Martin Saska}, journal={arXiv preprint arXiv:2410.06620}, year={2024}, archivePrefix={arXiv}, eprint={2410.06620}, primaryClass={cs.RO} }
silano2024task
arxiv-667398
2410.06621
Effective Exploration Based on the Structural Information Principles
<|reference_start|>Effective Exploration Based on the Structural Information Principles: Traditional information theory provides a valuable foundation for Reinforcement Learning, particularly through representation learning and entropy maximization for agent exploration. However, existing methods primarily concentrate on modeling the uncertainty associated with RL's random variables, neglecting the inherent structure within the state and action spaces. In this paper, we propose a novel Structural Information principles-based Effective Exploration framework, namely SI2E. Structural mutual information between two variables is defined to address the single-variable limitation in structural information, and an innovative embedding principle is presented to capture dynamics-relevant state-action representations. The SI2E analyzes value differences in the agent's policy between state-action pairs and minimizes structural entropy to derive the hierarchical state-action structure, referred to as the encoding tree. Under this tree structure, value-conditional structural entropy is defined and maximized to design an intrinsic reward mechanism that avoids redundant transitions and promotes enhanced coverage in the state-action space. Theoretical connections are established between SI2E and classical information-theoretic methodologies, highlighting our framework's rationality and advantage. Comprehensive evaluations in the MiniGrid, MetaWorld, and DeepMind Control Suite benchmarks demonstrate that SI2E significantly outperforms state-of-the-art exploration baselines regarding final performance and sample efficiency, with maximum improvements of 37.63% and 60.25%, respectively.<|reference_end|>
arxiv
@article{zeng2024effective, title={Effective Exploration Based on the Structural Information Principles}, author={Xianghua Zeng, Hao Peng, Angsheng Li}, journal={arXiv preprint arXiv:2410.06621}, year={2024}, archivePrefix={arXiv}, eprint={2410.06621}, primaryClass={cs.LG cs.AI} }
zeng2024effective
arxiv-667399
2410.06625
ETA: Evaluating Then Aligning Safety of Vision Language Models at Inference Time
<|reference_start|>ETA: Evaluating Then Aligning Safety of Vision Language Models at Inference Time: Vision Language Models (VLMs) have become essential backbones for multimodal intelligence, yet significant safety challenges limit their real-world application. While textual inputs are often effectively safeguarded, adversarial visual inputs can easily bypass VLM defense mechanisms. Existing defense methods are either resource-intensive, requiring substantial data and compute, or fail to simultaneously ensure safety and usefulness in responses. To address these limitations, we propose a novel two-phase inference-time alignment framework, Evaluating Then Aligning (ETA): 1) Evaluating input visual contents and output responses to establish a robust safety awareness in multimodal settings, and 2) Aligning unsafe behaviors at both shallow and deep levels by conditioning the VLMs' generative distribution with an interference prefix and performing sentence-level best-of-N to search the most harmless and helpful generation paths. Extensive experiments show that ETA outperforms baseline methods in terms of harmlessness, helpfulness, and efficiency, reducing the unsafe rate by 87.5% in cross-modality attacks and achieving 96.6% win-ties in GPT-4 helpfulness evaluation. The code is publicly available at https://github.com/DripNowhy/ETA.<|reference_end|>
arxiv
@article{ding2024eta:, title={ETA: Evaluating Then Aligning Safety of Vision Language Models at Inference Time}, author={Yi Ding, Bolian Li, Ruqi Zhang}, journal={arXiv preprint arXiv:2410.06625}, year={2024}, archivePrefix={arXiv}, eprint={2410.06625}, primaryClass={cs.CV cs.CL cs.LG} }
ding2024eta:
arxiv-667400
2410.06626
Open-RGBT: Open-vocabulary RGB-T Zero-shot Semantic Segmentation in Open-world Environments
<|reference_start|>Open-RGBT: Open-vocabulary RGB-T Zero-shot Semantic Segmentation in Open-world Environments: Semantic segmentation is a critical technique for effective scene understanding. Traditional RGB-T semantic segmentation models often struggle to generalize across diverse scenarios due to their reliance on pretrained models and predefined categories. Recent advancements in Visual Language Models (VLMs) have facilitated a shift from closed-set to open-vocabulary semantic segmentation methods. However, these models face challenges in dealing with intricate scenes, primarily due to the heterogeneity between RGB and thermal modalities. To address this gap, we present Open-RGBT, a novel open-vocabulary RGB-T semantic segmentation model. Specifically, we obtain instance-level detection proposals by incorporating visual prompts to enhance category understanding. Additionally, we employ the CLIP model to assess image-text similarity, which helps correct semantic consistency and mitigates ambiguities in category identification. Empirical evaluations demonstrate that Open-RGBT achieves superior performance in diverse and challenging real-world scenarios, even in the wild, significantly advancing the field of RGB-T semantic segmentation.<|reference_end|>
arxiv
@article{yu2024open-rgbt:, title={Open-RGBT: Open-vocabulary RGB-T Zero-shot Semantic Segmentation in Open-world Environments}, author={Meng Yu, Luojie Yang, Xunjie He, Yi Yang, Yufeng Yue}, journal={arXiv preprint arXiv:2410.06626}, year={2024}, archivePrefix={arXiv}, eprint={2410.06626}, primaryClass={cs.CV} }
yu2024open-rgbt: