{"id":"4d1182fc-6a29-40d1-9170-9d024a950bb3","title":"Create IEEE Bibliography From References","created_at":"2025-10-10T12:43:50Z","saved":true,"shared":true,"branch_id":"00000000-0000-4000-0000-000000000000","profile":{"id":null,"name":null,"subtitle":"State of the art deep research (>30 secs)","model":"ki_research","model_name":"Research","model_provider":"kagi","model_input_limit":1000000,"recommended":false,"model_info":"\n\n<div class=\"heading\">\n <div class=\"left\">\n \n \n \n <svg class=\"model-icon icon-xs\" data-model=\"ki_research\">\n <use href=\"#icon-kagi\"/>\n </svg>\n <h3>\n \n <span class=\"model-provider\">Kagi</span>\n \n <span class=\"model-name\">none</span>\n </h3>\n </div>\n\n \n <a href=\"/settings?p=custom_assistant&id=none\">\n <svg class=\"icon-sm edit-icon\" aria-hidden=\"true\">\n <use href=\"#icon-pencil\" />\n </svg>\n </a>\n \n</div>\n\n\n<hr />\n<ul>\n \n <li>\n <h4>\n Cost\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Expensive\">\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Quality\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 5 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Speed\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 1 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Privacy\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 4 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li class=\"divider\" aria-hidden=\"true\"></li>\n <li>\n <h4>Last updated</h4>\n <span class=\"date\">\n \n \n 2025-11-18\n \n \n </span>\n </li>\n \n</ul>\n","model_provider_name":"Kagi","internet_access":true,"personalizations":true,"shortcut":null,"is_default_profile":false},"tag_ids":[],"read_only":true}
[{"id":"af98e003-90d1-48da-b4ff-4cd56046ade8","thread_id":"4d1182fc-6a29-40d1-9170-9d024a950bb3","created_at":"2025-10-10T12:43:51Z","state":"done","prompt":"Extract and create IEEE bibliography for these references:\n\nhttps://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/\n\nhttps://proceedings.mlr.press/v106/kyono19a.html\n\nhttps://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full\n\nhttps://link.springer.com/chapter/10.1007/978-3-030-59725-2_24\n\nhttps://ieeexplore.ieee.org/abstract/document/9618960\n\nhttps://arxiv.org/pdf/1909.05397 ","reply":"<details><summary>Planned research</summary>\n<ul>\n<li>Access each of the 6 provided URLs to extract bibliographic information</li>\n<li>Extract author names, article titles, publication venues, dates, DOIs, and other IEEE citation format requirements</li>\n<li>Format each reference according to IEEE bibliography style</li>\n</ul>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/\" target=\"_blank\">Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, article title, journal name, volume, issue, page numbers, publication year, and DOI for creating an IEEE citation.</p>\n<h3>Response</h3>\n<p>The document does not contain its own bibliographic information such as author names, journal name, volume, issue, page numbers, publication year, or DOI. Therefore, I cannot provide the full bibliographic information for an IEEE citation.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://proceedings.mlr.press/v106/kyono19a.html\" target=\"_blank\">Multi-view Multi-task Learning for Improving Autonomous Mammogram Diagnosis</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, article title, proceedings name, volume, page numbers, publication year, and DOI for creating an IEEE citation.</p>\n<h3>Response</h3>\n<p>Author Names: Trent Kyono, Fiona J. Gilbert, Mihaela van der Schaar<br>\nArticle Title: Multi-view Multi-task Learning for Improving Autonomous Mammogram Diagnosis<br>\nProceedings Name: Proceedings of the 4th Machine Learning for Healthcare Conference<br>\nVolume: 106<br>\nPage Numbers: 571-591<br>\nPublication Year: 2019<br>\nURL: https://proceedings.mlr.press/v106/kyono19a.html</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full\" target=\"_blank\">Frontiers | Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, article title, journal name, volume, issue, page numbers, publication year, and DOI for creating an IEEE citation.</p>\n<h3>Response</h3>\n<p>The provided document does not contain the full bibliographic information (author names, journal name, volume, issue, page numbers, publication year, and DOI) for the article itself. It only contains the content of the article and references to other works.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://link.springer.com/chapter/10.1007/978-3-030-59725-2_24\" target=\"_blank\">Multi-task Learning for Detection and Classification of Cancer in Screening Mammography</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, chapter title, book title, editors, publisher, publication year, page numbers, and DOI for creating an IEEE citation.</p>\n<h3>Response</h3>\n<p>Author Names: Sainz de Cea, M.V., Diedrich, K., Bakalo, R., Ness, L., Richmond, D.<br>\nChapter Title: Multi-task Learning for Detection and Classification of Cancer in Screening Mammography<br>\nBook Title: Medical Image Computing and Computer Assisted Intervention – MICCAI 2020. MICCAI 2020. Lecture Notes in Computer Science(), vol 12266.<br>\nEditors: Martel, A.L., et al.<br>\nPublisher: Springer<br>\nPublication Year: 2020<br>\nPage Numbers: Not specified in the provided text.<br>\nDOI: https://doi.org/10.1007/978-3-030-59725-2_24</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://ieeexplore.ieee.org/abstract/document/9618960\" target=\"_blank\">Multi-Task Fusion for Improving Mammography Screening Data Classification</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, article title, conference name, publication year, page numbers, and DOI for creating an IEEE citation.</p>\n<h3>Response</h3>\n<p>The following bibliographic information is available:</p>\n<p>Article Title: Multi-Task Fusion for Improving Mammography Screening Data Classification<br>\nPublication Year: 2021<br>\nPage Numbers: 937-950<br>\nPublisher: IEEE<br>\nPrint ISSN: 0278-0062<br>\nElectronic ISSN: 1558-254X</p>\n<p>Author names, conference name, and DOI were not found in the provided text.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://arxiv.org/pdf/1909.05397\" target=\"_blank\">Multitask Classification and Segmentation for Cancer Diagnosis in Mammography</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the full bibliographic information including all author names, article title, publication venue if any, and year for creating an IEEE citation from this arXiv paper.</p>\n<h3>Response</h3>\n<p>Authors: Thi-Lam-Thuy Le, Nicolas Thome, Sylvain Bernard, Vincent Bismuth, and Fanny Patoureaux. Article Title: Multitask Classification and Segmentation for Cancer Diagnosis in Mammography. Publication Venue: Extended Abstract Track, Medical Imaging with Deep Learning 2019 (MIDL 2019). Year: 2019. arXiv Identifier: arXiv:1909.05397v1.</p>\n</details>\n<details><summary>Contemplated</summary> I have received responses from the librarian tool for all 6 sources. However, some of them are missing key bibliographic information:<p></p>\n<ol>\n<li>PMC article - No information extracted</li>\n<li>MLRP proceedings - Good information extracted (Kyono et al., 2019)</li>\n<li>Frontiers article - Missing information</li>\n<li>Springer chapter - Good information but missing page numbers (Sainz de Cea et al., 2020)</li>\n<li>IEEE article - Partial information, missing authors and DOI (2021)</li>\n<li>arXiv paper - Good information (Le et al., 2019)</li>\n</ol>\n<p>I need to search for the missing information or try accessing these pages differently to get the complete bibliographic details for items 1, 3, and 5.</p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>site:pmc.ncbi.nlm.nih.gov/articles/PMC10365086 author title journal</li>\n<li>site:frontiersin.org 10.3389/fradi.2021.796078 authors</li>\n<li>site:ieeexplore.ieee.org 9618960 authors DOI</li></ul></summary> Sources (7):<br>\n<strong>Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of ...</strong><br>\n<a href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/\" target=\"_blank\">https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/</a><br>\nHowever, the authors relied on 224x224 images, which is insufficient to detect cancer-related clinical features being sometimes smaller than 1 mm (15). In our case, we propose to learn the density representation as another auxiliary classification task, using the 5th edition Breast Imaging-Reporting And Data System (BI-RADS) density ...\n\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and ...</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full</a><br>\nDec 13, 2021 1:796078. doi: 10.3389/fradi.2021.796078.They may also be used by social media companies we work with to build a profile of your interests and show you relevant adverts on their services. They do not store directly personal information but are based on unique identifiers related to your browser and internet device. If you do not allow these cookies, you will experience less targeted advertising.</p>\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and ... - Frontiers</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/pdf\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/pdf</a><br>\n Received: 15 October 2021 Accepted: 06 December 2021 Published: 11 January 2022 Citation: Tardy M and Mateus D (2022) Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms. Front. Radiol. 1:796078. doi: 10.3389/fradi.2021.796078</p>\n<p><strong>Frontiers</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/text\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/text</a><br>\n In breast cancer screening, binary classification of mammograms is a common task aiming to determine whether a case is malignant or benign. A Computer-Aided Diagnosis (CADx) system based on a trainable classifier requires clean data and labels coming from a confirmed diagnosis. Unfortunately, such labels are not easy to obtain in clinical practice, since the histopathological reports of biopsy ...</p>\n<p><strong>Multi-Task Fusion for Improving Mammography Screening Data ...</strong><br>\n<a href=\"https://ieeexplore.ieee.org/document/9618960\" target=\"_blank\">https://ieeexplore.ieee.org/document/9618960</a><br>\nNov 22, 2021 To this end, we propose a multi-branch deep learning model which efficiently fuses features across different tasks and mammograms to obtain a comprehensive patient-level prediction. We train and evaluate our full pipeline on public mammography data, i.e., DDSM and its curated version CBIS-DDSM, and report an AUC score of 0.962 for predicting the presence of any lesion and 0.791 for predicting the presence of malignant lesions on patient level.</p>\n<p><strong>IEEE Transactions on Pattern Analysis and Machine... | IEEE Xplore</strong><br>\n<a href=\"https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=34\" target=\"_blank\">https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=34</a><br>\nFeb 25, 2012</p>\n<p><strong>Advanced Search - IEEE Xplore</strong><br>\n<a href=\"https://ieeexplore.ieee.org/search/advanced/citation\" target=\"_blank\">https://ieeexplore.ieee.org/search/advanced/citation</a><br>\n IEEE Xplore, delivering full text access to the world's highest quality technical literature in engineering and technology. | IEEE Xplore</p>\n</details>\n<details><summary>Contemplated</summary> Good progress! From the search results, I found:<p></p>\n<ol>\n<li>\n<p>PMC10365086 and Frontiers article (10.3389/fradi.2021.796078) appear to be THE SAME article:</p>\n<ul>\n<li>Authors: Tardy M and Mateus D</li>\n<li>Title: \"Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms\"</li>\n<li>Journal: Frontiers in Radiology</li>\n<li>Volume: 1</li>\n<li>Article: 796078</li>\n<li>DOI: 10.3389/fradi.2021.796078</li>\n<li>Published: January 11, 2022</li>\n<li>Received: October 15, 2021, Accepted: December 6, 2021</li>\n</ul>\n</li>\n<li>\n<p>For the IEEE article (9618960), I can see it's about \"Multi-Task Fusion for Improving Mammography Screening Data...\" published in 2021, but I still need author names and the full DOI.</p>\n</li>\n</ol>\n<p>Let me search more specifically for the IEEE article authors and also get the Springer chapter page numbers.</p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>Multi-Task Fusion for Improving Mammography Screening Data Classification authors IEEE 2021</li>\n<li>978-3-030-59725-2_24 page numbers Sainz de Cea</li></ul></summary> Sources (6):<br>\n<strong>Multi-task fusion for improving mammography screening ...</strong><br>\n<a href=\"https://arxiv.org/abs/2112.01320\" target=\"_blank\">https://arxiv.org/abs/2112.01320</a><br>\nDec 03, 2021 View a PDF of the paper titled Multi-task fusion for improving mammography screening data classification, by Maria Wimmer and 6 other authors.Abstract:Machine learning and deep learning methods have become essential for computer-assisted prediction in medicine, with a growing number of applications also in the field of mammography. Typically these algorithms are trained for a specific task, e.g., the classification of lesions or the prediction of a mammogram's pathology status.\n\n<p><strong>IEEE TRANSACTIONS ON MEDICAL IMAGING 1 Multi-task fusion for improving ...</strong><br>\n<a href=\"https://arxiv.org/pdf/2112.01320\" target=\"_blank\">https://arxiv.org/pdf/2112.01320</a><br>\nDec 03, 2021 WIMMER et al.: Multi-task fusion for improving mammography screening data classification. 3. rately for each view, fused them, and classied the patient as benign or malignant.The authors also provided a predened split into train and test sets to ensure comparability between methods evaluated on this dataset. Overall, the CBIS-DDSM dataset comprises 3568 annotated lesions (1696 masses, 1872 calcications) in a total of 3032 mammography view images. For further details on the data, we refer to the original publications [56], [57].</p>\n<p><strong>MV-Swin-T: Mammogram Classification with Multi-View Swin...</strong><br>\n<a href=\"https://arxiv.org/html/2402.16298v1\" target=\"_blank\">https://arxiv.org/html/2402.16298v1</a><br>\nFeb 27, 2024 Efforts to integrate multiple views into breast cancer screening through the utilization of Deep Neural Networks (DNNs) have roots in the work of Carneiro et al. [5] . They trained models individually on the MLO and CC views, then employed the features from the final fully connected layer to train a multinomial logistic regression model.To fully exploit multi-view insights, we present a novel transformer-based multi-view network, MV-Swin-T, built upon the Swin Transformer [11] architecture for mammographic image classification. Our contributions include</p>\n<p><strong>MICCAI 2020 | Guide Proceedings</strong><br>\n<a href=\"https://dl.acm.org/doi/proceedings/10.1007/978-3-030-59725-2\" target=\"_blank\">https://dl.acm.org/doi/proceedings/10.1007/978-3-030-59725-2</a><br>\nOct 04, 2020 Sainz de Cea,; Author Picture Karl Diedrich,; + 3. Pages 241–250https://doi.org/10.1007/978-3-030-59725-2_24. Abstract. Breast screening is an ...</p>\n<p><strong>Multi-task Learning for Detection and Classification of Cancer ...</strong><br>\n<a href=\"https://dl.acm.org/doi/10.1007/978-3-030-59725-2_24\" target=\"_blank\">https://dl.acm.org/doi/10.1007/978-3-030-59725-2_24</a><br>\n by MV Sainz de Cea · 2020 · Cited by 34 — Multi-task Learning for Detection and Classification of Cancer in Screening Mammography. Authors: Maria V. Sainz de Cea.</p>\n<p><strong>Maria V Sainz de Cea - Home - ACM Digital Library</strong><br>\n<a href=\"https://dl.acm.org/profile/99660002182\" target=\"_blank\">https://dl.acm.org/profile/99660002182</a><br>\n The Author Profile Page supplies a quick snapshot of an author's contribution to the field and some rudimentary measures of influence upon it. Over time, the contents of the Author Profile page may expand at the direction of the community.</p>\n</details>\n<details><summary>Contemplated</summary> Excellent! I found more information:<p></p>\n<ol>\n<li>\n<p>For the IEEE article (9618960):</p>\n<ul>\n<li>Authors: Maria Wimmer and 6 other authors (need to get full list)</li>\n<li>There's an arXiv version (2112.01320) that should have full author list</li>\n<li>It's published in IEEE Transactions on Medical Imaging</li>\n</ul>\n</li>\n<li>\n<p>For the Springer chapter:</p>\n<ul>\n<li>Pages: 241-250</li>\n<li>Authors: Maria V. Sainz de Cea, Karl Diedrich, + 3 more (need full list)</li>\n</ul>\n</li>\n</ol>\n<p>Let me access the arXiv paper for the IEEE article to get the full author list, and I'll also need to get complete author information for the Springer chapter.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://arxiv.org/pdf/2112.01320\" target=\"_blank\">Multi-task fusion for improving mammography screening data classification</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract all author names in order for the IEEE citation.</p>\n<h3>Response</h3>\n<p>Maria Wimmer, Gert Sluiter, David Major, Dimitrios Lenis, Astrid Berg, Theresa Neubauer, and Katja Bühler</p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>Maria V. Sainz de Cea Karl Diedrich Bakalo Ness Richmond full author list MICCAI 2020</li></ul></summary> Sources (3):<br>\n<strong>Multi-task learning for detection and classification of cancer ...</strong><br>\n<a href=\"https://research.ibm.com/publications/multi-task-learning-for-detection-and-classification-of-cancer-in-screening-mammography\" target=\"_blank\">https://research.ibm.com/publications/multi-task-learning-for-detection-and-classification-of-cancer-in-screening-mammography</a><br>\nSep 07, 2021 We train a network for both object detection and image classification, based on state-of-the-art models, and demonstrate significant improvement in the recall vs no recall decision on a multi-site, multi-vendor data set, measured by concordance with biopsy proven malignancy. We also observe improved detection of microcalcifications, and detection of cancer cases that were missed by radiologists, demonstrating that this approach could provide meaningful support for radiologists in breast screening (especially non-specialists).Authors. Maria Sainz de Cea. Karl Diedrich. Ran Bakalo.\n\n<p><strong>Healthcare Imaging - IBM Research</strong><br>\n<a href=\"https://research.ibm.com/haifa/dept/vst/health_pub.shtml\" target=\"_blank\">https://research.ibm.com/haifa/dept/vst/health_pub.shtml</a><br>\nSep 09, 2018 Maria V. Sainz de Cea, Karl Diedrich, Ran Bakalo, Lior Ness, David Richmond.Cardiac/Chest/Brain/General Medical Imaging. Echocardiagraphy segmentation based on a shape-guided deformable model driven by a fully convolutional network prior. Gopalkrishna Veni, Mehdi Moradi, Hakan Bulu, Girish Narayan, Tanveer Syeda-Mahmood. IEEE 15th International Symposium on Biomedical Imaging (ISBI).</p>\n<p><strong>IBM Research - Israel | Publications</strong><br>\n<a href=\"https://research.ibm.com/haifa/publications.html\" target=\"_blank\">https://research.ibm.com/haifa/publications.html</a><br>\nAug 15, 2022 Maria V. Sainz de Cea, Karl Diedrich, Ran Bakalo, Lior Ness, David Richmond. 2020.PRIME-MICCAI Workshop, 2020. Medical Imaging. The case of missed cancers: Applying AI as a radiologist’s safety net. Michal Chorev,Yoel Shoshan, Adam Spiro, Shaked Naor, Alon Hazan, Vesna Barros, Iuliana Weinstein, Esma Herzel, Varda Shalev, Michal Guindy,Michal Rosen-Zvi. 2020.</p>\n</details>\n<details><summary>Contemplated</summary> <br>\nPerfect! I now have the complete author list for both papers:<p></p>\n<p>IEEE article (9618960):</p>\n<ul>\n<li>Authors: Maria Wimmer, Gert Sluiter, David Major, Dimitrios Lenis, Astrid Berg, Theresa Neubauer, and Katja Bühler</li>\n<li>Title: Multi-task fusion for improving mammography screening data classification</li>\n<li>Journal: IEEE Transactions on Medical Imaging</li>\n<li>Volume: (need to check)</li>\n<li>Pages: 937-950</li>\n<li>Year: 2021</li>\n<li>DOI: Need to find</li>\n</ul>\n<p>Springer chapter:</p>\n<ul>\n<li>Authors: Maria V. Sainz de Cea, Karl Diedrich, Ran Bakalo, Lior Ness, David Richmond</li>\n<li>Complete information already obtained</li>\n</ul>\n<p>Now I need to get the DOI and volume for the IEEE article. Let me search for that specific information.</p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>10.1109 9618960 DOI</li>\n<li>IEEE Transactions on Medical Imaging volume 41 2022 Wimmer</li></ul></summary> Sources (6):<br>\n<strong>Multi-Task Fusion for Improving Mammography Screening ...</strong><br>\n<a href=\"https://ieeexplore.ieee.org/document/9618960/\" target=\"_blank\">https://ieeexplore.ieee.org/document/9618960/</a><br>\nNov 22, 2021 To this end, we propose a multi-branch deep learning model which efficiently fuses features across different tasks and mammograms to obtain a comprehensive patient-level prediction. We train and evaluate our full pipeline on public mammography data, i.e., DDSM and its curated version CBIS-DDSM, and report an AUC score of 0.962 for predicting the presence of any lesion and 0.791 for predicting the presence of malignant lesions on patient level.\n\n<p><strong>Predictive Modeling of Earthquakes in Los Angeles With Machine Learning ...</strong><br>\n<a href=\"https://ieeexplore.ieee.org/document/10623344\" target=\"_blank\">https://ieeexplore.ieee.org/document/10623344</a><br>\n Earthquakes pose a significant threat to urban areas, necessitating accurate forecasting models to mitigate their impact. This study focuses on earthquake forecasting in Los Angeles, a region with high seismic activity and limited research. We established a feature matrix for forecasting earthquakes within a 30-day period by analyzing the most predictive patterns from recent studies. Our model ...</p>\n<p><strong>Immunogenic cell death-related risk signature predicts prognosis and ...</strong><br>\n<a href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9618960/\" target=\"_blank\">https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9618960/</a><br>\n Lower-grade glioma (LGG) is a common malignant primary tumour in the central nervous system, and most patients eventually develop highly aggressive gliomas despite comprehensive traditional treatment. Tumour molecular subtypes and prognostic biomarkers ...</p>\n<p><strong>Domain generalization on medical imaging classification ...</strong><br>\n<a href=\"https://www.sciencedirect.com/science/article/abs/pii/S0010482521009380\" target=\"_blank\">https://www.sciencedirect.com/science/article/abs/pii/S0010482521009380</a><br>\n by C Li · 2022 · Cited by 100 — In this paper, we propose a novel DG scheme of episodic training with task augmentation on medical imaging classification.</p>\n<p><strong>Current and potential applications of artificial intelligence in ...</strong><br>\n<a href=\"https://www.sciencedirect.com/science/article/pii/S1939865423000796\" target=\"_blank\">https://www.sciencedirect.com/science/article/pii/S1939865423000796</a><br>\n by J Potočnik · 2023 · Cited by 69 — This review outlines select current and potential AI applications in medical imaging practice and provides a view of how diagnostic imaging suites will operate ...</p>\n<p><strong>Design Optimization of Spatial-Spectral Filters for Cone-Beam CT ...</strong><br>\n<a href=\"https://xplorestaging.ieee.org/document/9748122/\" target=\"_blank\">https://xplorestaging.ieee.org/document/9748122/</a><br>\n Published in: IEEE Transactions on Medical Imaging ( Volume: 41, Issue: 9, September 2022) Page (s): 2399 - 2413 Date of Publication: 04 April 2022 ISSN Information:</p>\n</details>\n<details><summary>Response planned</summary> <br>\nI have successfully extracted bibliographic information from all 6 sources. The key findings:<p></p>\n<ol>\n<li>PMC10365086 and the Frontiers article are the same paper (Tardy and Mateus, 2022)</li>\n<li>MLRP proceedings paper by Kyono et al. (2019) - complete info</li>\n<li>Springer chapter by Sainz de Cea et al. (2020) - complete info with pages 241-250</li>\n<li>IEEE article by Wimmer et al. (2022) - need to verify volume and get DOI</li>\n<li>arXiv paper by Le et al. (2019) - complete info</li>\n</ol>\n<p>I still need to find the volume number and DOI for the IEEE article. Let me try accessing the IEEE page directly one more time with librarian to get complete publication details.</p>\n<p>I will create IEEE-formatted bibliography entries for all 6 references. Since two URLs point to the same article (PMC and Frontiers), I'll note this and provide 5 unique references. Each entry will follow IEEE citation style with:</p>\n<ul>\n<li>Author names (initials after surname)</li>\n<li>Article/chapter title in quotes</li>\n<li>Publication venue in italics</li>\n<li>Volume, issue, page numbers</li>\n<li>Publication date</li>\n<li>DOI where available</li>\n</ul>\n<p>I need to get the complete IEEE article details first before finalizing.</p>\n<p>Present as a numbered list with each IEEE citation formatted according to standard IEEE bibliography style. Include a note that references 1 and 3 are the same article published in different locations. Provide URL fragments where possible to help locate specific information within the sources.</p>\n</details>","md":"","profile":{"id":null,"name":null,"subtitle":"State of the art deep research (>30 secs)","model":"ki_research","model_name":"Research","model_provider":"kagi","model_input_limit":1000000,"recommended":false,"model_info":"\n\n<div class=\"heading\">\n <div class=\"left\">\n \n \n \n <svg class=\"model-icon icon-xs\" data-model=\"ki_research\">\n <use href=\"#icon-kagi\"/>\n </svg>\n <h3>\n \n <span class=\"model-provider\">Kagi</span>\n \n <span class=\"model-name\">none</span>\n </h3>\n </div>\n\n \n <a href=\"/settings?p=custom_assistant&id=none\">\n <svg class=\"icon-sm edit-icon\" aria-hidden=\"true\">\n <use href=\"#icon-pencil\" />\n </svg>\n </a>\n \n</div>\n\n\n<hr />\n<ul>\n \n <li>\n <h4>\n Cost\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Expensive\">\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Quality\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 5 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Speed\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 1 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Privacy\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 4 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li class=\"divider\" aria-hidden=\"true\"></li>\n <li>\n <h4>Last updated</h4>\n <span class=\"date\">\n \n \n 2025-11-18\n \n \n </span>\n </li>\n \n</ul>\n","model_provider_name":"Kagi","internet_access":true,"personalizations":true,"shortcut":null,"is_default_profile":false},"metadata":"<li>\n <span class=\"attribute\">Model</span>\n <span class=\"value\"><i class=\"icon-xs mr-2\"><svg><use href=\"#icon-kagi\"></use></svg></i>Claude 4.5 Sonnet</span>\n</li>\n<li>\n <span class=\"attribute\">Version</span>\n <span class=\"value\">claude-sonnet-4-5-20250929</span>\n</li>\n<li>\n <span class=\"attribute\">Speed (tok/s)</span>\n <span class=\"value\">32</span>\n</li>\n<li>\n <span class=\"attribute\">Tokens</span>\n <span class=\"value\">15,449</span>\n</li>\n<li>\n <span class=\"attribute\">Cost / Total ($)</span>\n <span class=\"value\">0.25 / 0.25</span>\n</li>\n<li>\n <span class=\"attribute\">End to end time (s)</span>\n <span class=\"value\">74.79</span>\n</li>\n\n","documents":[]},{"id":"3405f442-b638-4942-a052-68418709e9c3","thread_id":"4d1182fc-6a29-40d1-9170-9d024a950bb3","created_at":"2025-10-10T12:45:19Z","state":"done","prompt":"continue","reply":"<details><summary>Planned research</summary>\n<ul>\n<li>Extract bibliographic information from all 6 provided URLs</li>\n<li>Retrieve publication details including authors, titles, publication venues, dates, DOIs, and page numbers</li>\n<li>Format each reference according to IEEE citation style</li>\n</ul>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/\" target=\"_blank\">Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, article title, journal name, volume, issue, page numbers, publication date, and DOI</p>\n<h3>Response</h3>\n<p>The complete bibliographic information for the article is not available in the provided text. The title is \"Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms\". The authors are mentioned as \"MT is the first author of this paper. DM is the senior author of this paper. All authors contributed to the article and approved the submitted version.\" However, the full list of authors, journal name, volume, issue, page numbers, publication date, and DOI are not present in the document.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://proceedings.mlr.press/v106/kyono19a.html\" target=\"_blank\">Multi-view Multi-task Learning for Improving Autonomous Mammogram Diagnosis</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, paper title, conference/proceedings name, volume, page numbers, publication year, and DOI or URL</p>\n<h3>Response</h3>\n<p>BibTeX</p>\n<p><code>@InProceedings{pmlr-v106-kyono19a, title = {Multi-view Multi-task Learning for Improving Autonomous Mammogram Diagnosis}, author = {Kyono, Trent and Gilbert, Fiona J. and van der Schaar, Mihaela}, booktitle = {Proceedings of the 4th Machine Learning for Healthcare Conference}, pages = {571--591}, year = {2019}, editor = {Doshi-Velez, Finale and Fackler, Jim and Jung, Ken and Kale, David and Ranganath, Rajesh and Wallace, Byron and Wiens, Jenna}, volume = {106}, series = {Proceedings of Machine Learning Research}, month = {09--10 Aug}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v106/kyono19a/kyono19a.pdf}, url = {https://proceedings.mlr.press/v106/kyono19a.html}, abstract = {The number of women requiring screening and diagnostic mammography is increasing. The recent promise of machine learning on medical images have led to an influx of studies using deep learning for autonomous mammogram diagnosis. We present a novel multi-view multi-task (MVMT) convolutional neural network (CNN) trained to predict the radiological assessments known to be associated with cancer, such as breast density, conspicuity, etc., in addition to cancer diagnosis. We show on full-eld mammograms that multi-task learning has three advantages: 1) learning refined feature representations associated with cancer improves the classification performance of the diagnosis task, 2) issuing radiological assessments provides an additional layer of model interpretability that a radiologist can use to debug and scrutinize the diagnoses provided by the CNN, and 3) improves the radiological workflow by providing automated annotation of radiological reports. Results obtained on a private dataset of over 7,000 patients show that our MVMT network attained an AUROC and AUPRC of 0.855 $\\pm$ 0.021 and 0.646 $\\pm$ 0.023, respectively, and improved on the performance of other state-of-the-art multi-view CNNs.} }</code></p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full\" target=\"_blank\">Frontiers | Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, article title, journal name, volume, page numbers, publication date, and DOI</p>\n<h3>Response</h3>\n<p>The provided text is the content of the article itself, not a bibliography. Therefore, I cannot extract the complete bibliographic information such as authors, journal name, volume, page numbers, and DOI from this text.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://link.springer.com/chapter/10.1007/978-3-030-59725-2_24\" target=\"_blank\">Multi-task Learning for Detection and Classification of Cancer in Screening Mammography</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, chapter/paper title, book/conference title, volume, page numbers, publication year, publisher, and DOI</p>\n<h3>Response</h3>\n<p>Authors: Maria V. Sainz de Cea, Karl Diedrich, Ran Bakalo, Lior Ness, David Richmond<br>\nChapter/Paper Title: Multi-task Learning for Detection and Classification of Cancer in Screening Mammography<br>\nBook/Conference Title: Medical Image Computing and Computer Assisted Intervention – MICCAI 2020<br>\nVolume: vol 12266<br>\nPublication Year: 2020<br>\nPublisher: Springer, Cham<br>\nDOI: https://doi.org/10.1007/978-3-030-59725-2_24</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://ieeexplore.ieee.org/abstract/document/9618960\" target=\"_blank\">Multi-Task Fusion for Improving Mammography Screening Data Classification</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, article title, conference/journal name, volume, page numbers, publication date, and DOI</p>\n<h3>Response</h3>\n<p>The article title is \"Multi-Task Fusion for Improving Mammography Screening Data Classification\". It is published in \"IEEE Journals & Magazine\" on 17 November 2021. The page numbers are 937-950. The Print ISSN is 0278-0062 and the Electronic ISSN is 1558-254X. The document does not contain information about the authors or the DOI.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://arxiv.org/pdf/1909.05397\" target=\"_blank\">Multitask Classification and Segmentation for Cancer Diagnosis in Mammography</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Extract the complete bibliographic information including all authors, paper title, publication venue (conference/journal if published), publication year, and any other relevant citation information</p>\n<h3>Response</h3>\n<p>The bibliographic information for the paper is as follows:</p>\n<p><strong>Title:</strong> Multitask Classification and Segmentation for Cancer Diagnosis in Mammography<br>\n<strong>Authors:</strong> Thi-Lam-Thuy Le, Nicolas Thome, Sylvain Bernard, Vincent Bismuth, Fanny Patoureaux<br>\n<strong>Publication Venue:</strong> Medical Imaging with Deep Learning 2019 MIDL 2019 – Extended Abstract Track<br>\n<strong>Publication Year:</strong> 2019<br>\n<strong>Other Citation Information:</strong> arXiv:1909.05397v1, 11 Sep 2019</p>\n</details>\n<details><summary>Contemplated</summary> I've received information from all 6 sources, but I'm missing some critical details:<p></p>\n<ol>\n<li>PMC article - Missing full author list, journal details, DOI</li>\n<li>MLR Press - Complete information available from BibTeX</li>\n<li>Frontiers - Couldn't extract bibliographic data from the librarian response</li>\n<li>Springer - Complete information available</li>\n<li>IEEE - Missing authors and DOI, but have title, page numbers, publication date</li>\n<li>ArXiv - Complete information available</li>\n</ol>\n<p>I need to do additional searches to get the missing metadata for sources 1, 3, and 5. Let me search the web pages directly or use different queries to extract this information.</p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>PMC10365086 leveraging multi-task learning mammograms authors DOI</li>\n<li>frontiersin fradi.2021.796078 authors DOI citation</li>\n<li>IEEE 9618960 multi-task fusion mammography authors DOI</li></ul></summary> Sources (9):<br>\n<strong>Leveraging Multi-Task Learning to Cope With Poor and ...</strong><br>\n<a href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/\" target=\"_blank\">https://pmc.ncbi.nlm.nih.gov/articles/PMC10365086/</a><br>\nFeb 28, 2025 PMCID: PMC10365086 PMID: 37492176. Abstract. In breast cancer screening, binary classification of mammograms is a common task aiming to determine whether a case is malignant or benign.To better exploit raw clinical databases, in this work, we propose a Multi-Task Learning (MTL) approach allowing us to maximize the quantity of data used for training, without the need to carefully curate sample annotations. To this end, we propose to simultaneously train a neural network for multiple tasks relevant to mammography.\n\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and Missing ... - PubMed</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/37492176/\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/37492176/</a><br>\n Moreover, we use mammograms from Susan G. Komen Tissue Bank for fine-tuning, further demonstrating the ability to improve the performances in our multi-task learning setup from raw clinical data. We achieved the binary classification performance of AUC = 80.46 on our private dataset and AUC = 85.23 on the INBreast dataset.</p>\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of ...</strong><br>\n<a href=\"https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10365086/\" target=\"_blank\">https://www.ncbi.nlm.nih.gov/pmc/articles/PMC10365086/</a><br>\n In breast cancer screening, binary classification of mammograms is a common task aiming to determine whether a case is malignant or benign. A Computer-Aided Diagnosis (CADx) system based on a trainable classifier requires clean data and labels coming ...</p>\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and ...</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/full</a><br>\nDec 13, 2021 Front. Radiol. 1:796078. doi: 10.3389/fradi.2021.796078.These cookies may be set by us to offer your personalized content and opportunities to cooperate. They may also be used by social media companies we work with to build a profile of your interests and show you relevant adverts on their services. They do not store directly personal information but are based on unique identifiers related to your browser and internet device.</p>\n<p><strong>Leveraging Multi-Task Learning to Cope With Poor and ... - Frontiers</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/pdf\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles/10.3389/fradi.2021.796078/pdf</a><br>\n Received: 15 October 2021 Accepted: 06 December 2021 Published: 11 January 2022 Citation: Tardy M and Mateus D (2022) Leveraging Multi-Task Learning to Cope With Poor and Missing Labels of Mammograms. Front. Radiol. 1:796078. doi: 10.3389/fradi.2021.796078</p>\n<p><strong>Frontiers in Radiology | Articles</strong><br>\n<a href=\"https://www.frontiersin.org/journals/radiology/articles\" target=\"_blank\">https://www.frontiersin.org/journals/radiology/articles</a><br>\nJul 14, 2022 doi 10.3389/fradi.2025.1672382. Endovascular treatment of fenestration of the posterior communicating artery with an aneurysm at the same site:Case report and review of the literature.doi 10.3389/fradi.2025.1634165. 821 views. Evaluation of the effectiveness of contrast-enhanced ultrasound in the diagnosis of early hepatocellular carcinoma: a systematic review.</p>\n<p><strong>Multi-Task Fusion for Improving Mammography Screening ...</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/34788218/\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/34788218/</a><br>\nDec 08, 2021 To this end, we propose a multi-branch deep learning model which efficiently fuses features across different tasks and mammograms to obtain a comprehensive patient-level prediction. We train and evaluate our full pipeline on public mammography data, i.e., DDSM and its curated version CBIS-DDSM, and report an AUC score of 0.962 for predicting the presence of any lesion and 0.791 for predicting the presence of malignant lesions on patient level.</p>\n<p><strong>A multi-task fusion model based on a residual-Multi-layer ...</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/38432087\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/38432087</a><br>\nMar 05, 2024 For the breast cancer screening task, single feature extraction models are generally used, which directly extract only those potential features from the input mammogram that are relevant to the target task. This can lead to the neglect of other important morphological features of the lesion as well as other auxiliary information from the internal breast tissue. To obtain more comprehensive and objective diagnostic results, in this study, we developed a multi-task fusion model that combines multiple specific tasks for CAD of mammograms.</p>\n<p><strong>Paradigm-Shifting Attention-based Hybrid View Learning for Enhanced ...</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/40354201/\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/40354201/</a><br>\n Paradigm-Shifting Attention-based Hybrid View Learning for Enhanced Mammography Breast Cancer Classification with Multi-Scale and Multi-View Fusion IEEE J Biomed Health Inform. 2025 May 12:PP. doi: 10.1109/JBHI.2025.3569726. Online ahead of print.</p>\n</details>\n<details><summary>Contemplated</summary> Good progress! From the search results I can see:<p></p>\n<ol>\n<li>\n<p>PMC article - Authors are Tardy M and Mateus D, published in Front. Radiol., volume 1, article 796078, DOI: 10.3389/fradi.2021.796078, published January 11, 2022. This is actually the same as the Frontiers article (source 3)!</p>\n</li>\n<li>\n<p>Frontiers article - Same as above: Tardy M and Mateus D (2022), Front. Radiol. 1:796078. doi: 10.3389/fradi.2021.796078</p>\n</li>\n<li>\n<p>IEEE article (9618960) - Found PubMed reference showing it's from IEEE J Biomed Health Inform, but I still need the full author list and exact publication details.</p>\n</li>\n</ol>\n<p>Let me get more specific information on the IEEE article.</p>\n</details>\n<details><summary>Gathered key details from:<p></p>\n<ul>\n<li><a href=\"https://ieeexplore.ieee.org/abstract/document/9618960\" target=\"_blank\">Multi-Task Fusion for Improving Mammography Screening Data Classification</a></li></ul></summary>\n\n<h3>Query</h3>\n<p>Find the complete author list, journal name, volume number, issue number, year, month, and DOI for this IEEE article</p>\n<h3>Response</h3>\n<p>The journal name is IEEE Journals & Magazine. The publication date is November 2021. The author list, volume number, issue number, and DOI are not available in the provided document.</p>\n</details>\n<details><summary>Contemplated</summary> The librarian is having difficulty extracting all details from the IEEE page. Let me try a direct web search for this specific IEEE document number to find the complete citation information.<p></p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>IEEE document 9618960 citation authors</li>\n<li>Multi-Task Fusion for Improving Mammography Screening Data Classification IEEE Trans Med Imaging 2022</li></ul></summary> Sources (6):<br>\n<strong>Getting started with IEEE referencing - IEEE Citation Guide</strong><br>\n<a href=\"https://researchguides.njit.edu/ieee-citation/ieeereferencing\" target=\"_blank\">https://researchguides.njit.edu/ieee-citation/ieeereferencing</a><br>\nSep 12, 2024 IEEE style is a numbered referencing style that uses citation numbers in the text of the paper, provided in square brackets.\n\n<p><strong>IEEE Editorial Style Manual - IEEE Author Center Journals</strong><br>\n<a href=\"https://journals.ieeeauthorcenter.ieee.org/your-role-in-article-production/ieee-editorial-style-manual/\" target=\"_blank\">https://journals.ieeeauthorcenter.ieee.org/your-role-in-article-production/ieee-editorial-style-manual/</a><br>\n For spelling reference, IEEE uses The Merriam-Webster Dictionary. For guidance on grammar and usage not included in the IEEE Editorial Style Manual, consult The Chicago Manual of Style, published by the University of Chicago Press.</p>\n<p><strong>IEEE Reference Style Guide for Authors</strong><br>\n<a href=\"https://journals.ieeeauthorcenter.ieee.org/wp-content/uploads/sites/7/IEEE_Reference_Guide.pdf\" target=\"_blank\">https://journals.ieeeauthorcenter.ieee.org/wp-content/uploads/sites/7/IEEE_Reference_Guide.pdf</a><br>\nDec 21, 2015</p>\n<p><strong>Multi-Task Fusion for Improving Mammography Screening ...</strong><br>\n<a href=\"https://ieeexplore.ieee.org/document/9618960/\" target=\"_blank\">https://ieeexplore.ieee.org/document/9618960/</a><br>\nNov 22, 2021 To this end, we propose a multi-branch deep learning model which efficiently fuses features across different tasks and mammograms to obtain a comprehensive patient-level prediction. We train and evaluate our full pipeline on public mammography data, i.e., DDSM and its curated version CBIS-DDSM, and report an AUC score of 0.962 for predicting the presence of any lesion and 0.791 for predicting the presence of malignant lesions on patient level.</p>\n<p><strong>Deep Neural Networks With Region-Based Pooling... | IEEE Xplore</strong><br>\n<a href=\"https://ieeexplore.ieee.org/document/8964266/citations?tabFilter=papers\" target=\"_blank\">https://ieeexplore.ieee.org/document/8964266/citations?tabFilter=papers</a><br>\nJun 12, 2023 Mammography is the most commonly used screening technology for detecting breast cancer. Traditional machine learning methods of mammographic image classification or segmentation using manual features require a great quantity of manual segmentation annotation data to train the model and test the results. But manual labeling is expensive, time-consuming, and laborious, and greatly increases the cost of system construction.</p>\n<p><strong>IEEE Transactions on Medical Imaging | IEEE Xplore</strong><br>\n<a href=\"https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=42\" target=\"_blank\">https://ieeexplore.ieee.org/xpl/RecentIssue.jsp?punumber=42</a><br>\nFeb 25, 2012 IEEE Transactions on Medical Imaging (T-MI) encourages the submission of manuscripts on imaging of body structure, morp.A not-for-profit organization, IEEE is the world's largest technical professional organization dedicated to advancing technology for the benefit of humanity. © Copyright 2025 IEEE - All rights reserved. Use of this web site signifies your agreement to the terms and conditions.</p>\n</details>\n<details><summary>Contemplated</summary> The searches are not giving me the detailed citation info I need. Let me try accessing Google Scholar or searching more specifically for the complete citation with authors.<p></p>\n</details>\n<details><summary>Searched with Kagi<p></p>\n<ul>\n<li>Multi-Task Fusion for Improving Mammography Screening Data Classification authors IEEE Trans Med Imaging</li>\n<li>Geras Moy Shen Galperin-Aizenberg mammography multi-task fusion</li></ul></summary> Sources (6):<br>\n<strong>Multi-Task Fusion for Improving Mammography Screening ...</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/34788218/\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/34788218/</a><br>\nDec 08, 2021 IEEE Trans Med Imaging.Typically these algorithms are trained for a specific task, e.g., the classification of lesions or the prediction of a mammogram's pathology status. To obtain a comprehensive view of a patient, models which were all trained for the same task(s) are subsequently ensembled or combined. In this work, we propose a pipeline approach, where we first train a set of individual, task-specific models and subsequently investigate the fusion thereof, which is in contrast to the standard model ensembling strategy.\n\n<p><strong>A multi-task fusion model based on a residual-Multi-layer perceptron...</strong><br>\n<a href=\"https://pubmed.ncbi.nlm.nih.gov/38432087/\" target=\"_blank\">https://pubmed.ncbi.nlm.nih.gov/38432087/</a><br>\nMar 05, 2024 For the breast cancer screening task, single feature extraction models are generally used, which directly extract only those potential features from the input mammogram that are relevant to the target task. This can lead to the neglect of other important morphological features of the lesion as well as other auxiliary information from the internal breast tissue. To obtain more comprehensive and objective diagnostic results, in this study, we developed a multi-task fusion model that combines multiple specific tasks for CAD of mammograms.</p>\n<p><strong>Improving Mammography Lesion Classification by Optimal Fusion...</strong><br>\n<a href=\"https://pmc.ncbi.nlm.nih.gov/articles/PMC8935657/\" target=\"_blank\">https://pmc.ncbi.nlm.nih.gov/articles/PMC8935657/</a><br>\nMar 16, 2025 Phys Med Biol. . Author manuscript; available in PMC: 2023 Feb 21.This study demonstrates that both handcrafted and automated futures contain useful information to classify breast lesions. Fusion of these two types of features can further increase CAD performance. Keywords: Computer-aided diagnosis (CAD), deep transfer learning, convolutional neural network, handcrafted features, feature level fusion, mammography, classification of breast lesions.</p>\n<p><strong>Multi-task fusion for improving mammography screening ...</strong><br>\n<a href=\"https://www.researchgate.net/publication/356746406_Multi-task_fusion_for_improving_mammography_screening_data_classification\" target=\"_blank\">https://www.researchgate.net/publication/356746406_Multi-task_fusion_for_improving_mammography_screening_data_classification</a><br>\nDec 09, 2021 Linda Moy Linda Moy. Krzysztof Geras.Nevertheless, to improve the final decision-making, the concept of multi-view information fusion (MVIF) was recently introduced. Indeed, this concept has been successfully applied in the context of breast cancer, since screening mammography provides two views for each breast: MedioLateral-Oblique (MLO) and CranioCaudal (CC) views.</p>\n<p><strong>Multi-Task Fusion for Improving Mammography Screening ...</strong><br>\n<a href=\"https://www.researchgate.net/publication/356317761_Multi-Task_Fusion_for_Improving_Mammography_Screening_Data_Classification\" target=\"_blank\">https://www.researchgate.net/publication/356317761_Multi-Task_Fusion_for_Improving_Mammography_Screening_Data_Classification</a><br>\nAug 22, 2022 Machine learning and deep learning methods have become essential for computer-assisted prediction in medicine, with a growing number of applications also in the field of mammography. Typically these algorithms are trained for a specific task, e.g., the classification of lesions or the prediction of a mammogram’s pathology status. To obtain a comprehensive view of a patient, models which were all trained for the same task(s) are subsequently ensembled or combined.</p>\n<p><strong>Multi-View Information Fusion in Mammograms: A Comprehensive...</strong><br>\n<a href=\"https://www.researchgate.net/publication/332974463_Multi-View_Information_Fusion_in_Mammograms_A_Comprehensive_Overview\" target=\"_blank\">https://www.researchgate.net/publication/332974463_Multi-View_Information_Fusion_in_Mammograms_A_Comprehensive_Overview</a><br>\nMay 15, 2019 Several multi-view fusion schemes learn on full images from each view separately and concatenate respective features afterwards. ... ... Essential to make decisions in clinical routine, the information presented in the two paired CC/MLO views is highly complementary and could serve as a second source of decision ( Jouirou et al., 2019 ). However, due to breast deformation and different acquisition conditions combined with the lack of 3D information, multiview fusion for dual-view mammogram analysis is a challenging task. ...</p>\n</details>\n<details><summary>Response planned</summary> I've successfully extracted bibliographic information from all 6 sources through a combination of librarian queries and web searches. The key findings:<p></p>\n<ol>\n<li>PMC10365086 and Frontiers article are the same paper by Tardy M and Mateus D (2022)</li>\n<li>MLR Press proceedings paper has complete BibTeX information</li>\n<li>Springer chapter has complete information</li>\n<li>IEEE article from PubMed shows it's in IEEE Trans Med Imaging but I'm still missing complete author list and some details</li>\n<li>ArXiv paper has complete information</li>\n</ol>\n<p>For the IEEE article, I have partial information but can construct a citation with what's available. Let me try one more focused search on PubMed for the complete author list.</p>\n<p>I will provide IEEE-formatted bibliography entries for all 6 references. The structure will be:</p>\n<ol>\n<li>Present each citation in IEEE format with proper numbering</li>\n<li>Include all available bibliographic details (authors, title, venue, volume, pages, year, DOI)</li>\n<li>Note that two URLs (PMC and Frontiers) refer to the same article</li>\n<li>For the IEEE article, I'll need to make one more attempt to get complete author information before finalizing</li>\n</ol>\n<p>Present as a numbered list following IEEE citation style. Each entry should include: Authors (initials + last name), \"Title,\" Publication venue in italics, volume, issue (if applicable), page numbers, month and year, DOI. Use standard IEEE punctuation and formatting.</p>\n</details>","md":"","profile":{"id":null,"name":null,"subtitle":"State of the art deep research (>30 secs)","model":"ki_research","model_name":"Research","model_provider":"kagi","model_input_limit":1000000,"recommended":false,"model_info":"\n\n<div class=\"heading\">\n <div class=\"left\">\n \n \n \n <svg class=\"model-icon icon-xs\" data-model=\"ki_research\">\n <use href=\"#icon-kagi\"/>\n </svg>\n <h3>\n \n <span class=\"model-provider\">Kagi</span>\n \n <span class=\"model-name\">none</span>\n </h3>\n </div>\n\n \n <a href=\"/settings?p=custom_assistant&id=none\">\n <svg class=\"icon-sm edit-icon\" aria-hidden=\"true\">\n <use href=\"#icon-pencil\" />\n </svg>\n </a>\n \n</div>\n\n\n<hr />\n<ul>\n \n <li>\n <h4>\n Cost\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Expensive\">\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-cost --high\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Quality\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 5 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Speed\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 1 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li>\n <h4>\n Privacy\n </h4>\n <span>\n \n \n \n\n \n \n \n\n <div class=\"score\" aria-label=\"Rated 4 out of 5\">\n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"filled-bubble\"></div>\n \n \n \n \n <div aria-hidden=\"true\" class=\"unfilled-bubble\"></div>\n \n \n </div>\n</span>\n </li>\n <li class=\"divider\" aria-hidden=\"true\"></li>\n <li>\n <h4>Last updated</h4>\n <span class=\"date\">\n \n \n 2025-11-18\n \n \n </span>\n </li>\n \n</ul>\n","model_provider_name":"Kagi","internet_access":true,"personalizations":true,"shortcut":null,"is_default_profile":false},"metadata":"<li>\n <span class=\"attribute\">Model</span>\n <span class=\"value\"><i class=\"icon-xs mr-2\"><svg><use href=\"#icon-kagi\"></use></svg></i>Claude 4.5 Sonnet</span>\n</li>\n<li>\n <span class=\"attribute\">Version</span>\n <span class=\"value\">claude-sonnet-4-5-20250929</span>\n</li>\n<li>\n <span class=\"attribute\">Speed (tok/s)</span>\n <span class=\"value\">29</span>\n</li>\n<li>\n <span class=\"attribute\">Tokens</span>\n <span class=\"value\">15,565</span>\n</li>\n<li>\n <span class=\"attribute\">Cost / Total ($)</span>\n <span class=\"value\">0.21 / 0.46</span>\n</li>\n<li>\n <span class=\"attribute\">End to end time (s)</span>\n <span class=\"value\">66.9</span>\n</li>\n\n","documents":[]}]