jpcorb20 commited on
Commit
8a52461
·
verified ·
1 Parent(s): a94ac47

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +26 -6
README.md CHANGED
@@ -219,14 +219,34 @@ This project may contain trademarks or logos for projects, products, or services
219
 
220
  ## Citation
221
 
222
- @article{corbeil2025modular,
223
- title={A Modular Approach for Clinical SLMs Driven by Synthetic Data with Pre-Instruction Tuning, Model Merging, and Clinical-Tasks Alignment},
224
- author={Corbeil, Jean-Philippe and Dada, Amin and Attendu, Jean-Michel and Abacha, Asma Ben and Sordoni, Alessandro and Caccia, Lucas and Beaulieu, Fran{\c{c}}ois and Lin, Thomas and Kleesiek, Jens and Vozila, Paul},
225
- journal={arXiv preprint arXiv:2505.10717},
226
- year={2025}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  }
228
 
229
-
230
  ## Model Card Authors
231
 
232
  Jean-Philippe Corbeil
 
219
 
220
  ## Citation
221
 
222
+ @inproceedings{corbeil-etal-2025-modular,
223
+ title = "A Modular Approach for Clinical {SLM}s Driven by Synthetic Data with Pre-Instruction Tuning, Model Merging, and Clinical-Tasks Alignment",
224
+ author = "Corbeil, Jean-Philippe and
225
+ Dada, Amin and
226
+ Attendu, Jean-Michel and
227
+ Ben Abacha, Asma and
228
+ Sordoni, Alessandro and
229
+ Caccia, Lucas and
230
+ Beaulieu, Francois and
231
+ Lin, Thomas and
232
+ Kleesiek, Jens and
233
+ Vozila, Paul",
234
+ editor = "Che, Wanxiang and
235
+ Nabende, Joyce and
236
+ Shutova, Ekaterina and
237
+ Pilehvar, Mohammad Taher",
238
+ booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
239
+ month = jul,
240
+ year = "2025",
241
+ address = "Vienna, Austria",
242
+ publisher = "Association for Computational Linguistics",
243
+ url = "https://aclanthology.org/2025.acl-long.950/",
244
+ doi = "10.18653/v1/2025.acl-long.950",
245
+ pages = "19352--19374",
246
+ ISBN = "979-8-89176-251-0",
247
+ abstract = "High computation costs and latency of large language models such as GPT-4 have limited their deployment in clinical settings. Small language models (SLMs) offer a cost-effective alternative, but their limited capacity requires biomedical domain adaptation, which remains challenging. An additional bottleneck is the unavailability and high sensitivity of clinical data. To address these challenges, we propose a novel framework for adapting SLMs into high-performing clinical models. We introduce the MediPhi collection of 3.8B-parameter SLMs developed with our novel framework: pre-instruction tuning of experts on relevant medical and clinical corpora (PMC, Medical Guideline, MedWiki, etc.), model merging, and clinical-tasks alignment. To cover most clinical tasks, we extended the CLUE benchmark to CLUE+, doubling its size. Our expert models deliver relative improvements on this benchmark over the base model without any task-specific fine-tuning: 64.3{\%} on medical entities, 49.5{\%} on radiology reports, and 44{\%} on ICD-10 coding (outperforming GPT-4-0125 by 14{\%}). We unify the expert models into MediPhi via model merging, preserving gains across benchmarks. Furthermore, we built the MediFlow collection, a synthetic dataset of 2.5 million high-quality instructions on 14 medical NLP tasks, 98 fine-grained document types, and JSON format support. Alignment of MediPhi using supervised fine-tuning and direct preference optimization achieves further gains of 18.9{\%} on average."
248
  }
249
 
 
250
  ## Model Card Authors
251
 
252
  Jean-Philippe Corbeil