@article {13533, title = {Memory and language cognitive data harmonization across the United States and Mexico.}, journal = {Alzheimer{\textquoteright}s \& Dementia (Amsterdam, Netherlands)}, volume = {15}, year = {2023}, pages = {e12478}, abstract = {

INTRODUCTION: We used cultural neuropsychology-informed procedures to derive and validate harmonized scores representing memory and language across population-based studies in the United States and Mexico.

METHODS: Data were from the Health and Retirement Study Harmonized Cognitive Assessment Protocol (HRS-HCAP) and the Mexican Health and Aging Study (MHAS) Ancillary Study on Cognitive Aging (Mex-Cog). We statistically co-calibrated memory and language domains and performed differential item functioning (DIF) analysis using a cultural neuropsychological approach. We examined relationships among harmonized scores, age, and education.

RESULTS: We included 3170 participants from the HRS-HCAP (age~=~76.6 [standard deviation (SD): 7.5], 60\% female) and 2042 participants from the Mex-Cog (age~=~68.1 [SD: 9.0], 59\% female). Five of seven memory items and one of twelve language items demonstrated DIF by study. Harmonized memory and language scores showed expected associations with age and education.

DISCUSSION: A cultural neuropsychological approach to harmonization facilitates the generation of harmonized measures of memory and language function in cross-national studies.

HIGHLIGHTS: We harmonized memory and language scores across studies in the United States and Mexico.A cultural neuropsychological approach to data harmonization was used.Harmonized scores showed minimal measurement differences between cohorts.Future work can use these harmonized scores for cross-national studies of Alzheimer{\textquoteright}s disease and related dementias.

}, keywords = {Alzheimer{\textquoteright}s disease, cognitive aging, cross-cultural, cultural neuropsychology, harmonization}, issn = {2352-8729}, doi = {10.1002/dad2.12478}, author = {Arce Renter{\'\i}a, Miguel and Brice{\~n}o, Emily M and Chen, Diefei and Saenz, Joseph and Lindsay C Kobayashi and Gonzalez, Christopher and Vonk, Jet M J and Richard N Jones and Jennifer J Manly and Wong, Rebeca and David R Weir and Kenneth M. Langa and Gross, Alden L} } @article {12699, title = {Shifting of Cognitive Assessments Between Face-to-Face and Telephone Administration: Measurement Considerations.}, journal = {The Journals of Gerontology, Series B}, volume = {78}, year = {2023}, pages = {191-200}, abstract = {

OBJECTIVES: Telephone-administered cognitive assessments are a cost-effective and sometimes necessary alternative to face-to-face assessments. There is limited information in large studies concerning mode effects, or differences in cognition attributable to assessment method, as a potential measurement threat. We evaluated mode effects on cognitive scores using a population-based sample of community-living older adults.

METHODS: We used data from participants aged 65-79 in the 2014 Health and Retirement Study for whom interview mode was randomized (n=6825). We assessed mode differences in test means, whether mode modifies associations of cognition with criterion variables, and formal measurement invariance testing.

RESULTS: Relative to face-to-face assessment, telephone assessment was associated with higher scores for memory and calculation (0.06 to 0.013 standard deviations (SD)) and lower scores for non-memory items (-0.09 to -0.01 SD). Cognition was significantly differentially related to IADL difficulty depending on assessment mode. Measurement invariance testing identified evidence of mode differences in certain tests as a function of mode: adjusting for underlying cognition, the largest mode differences in memory and attention: immediate noun recall, delayed word recall, and serial-7s scores were higher given telephone administration.

DISCUSSION: Differences by mode of administration are apparent in cognitive measurement in older adults albeit to a small degree in our study, and most pronounced for tests of memory and attention. The importance of accounting for mode differences ultimately depends on one{\textquoteright}s research question and study sample: not all associations may be affected by mode differences and such modification may only be apparent among those with lower cognitive functioning.

}, keywords = {Cognition, Mode effects, Psychometrics, Telephone}, issn = {1758-5368}, doi = {10.1093/geronb/gbac135}, author = {Smith, Jason R and Gibbons, Laura E and Crane, Paul K and Mungas, Dan M and Glymour, M Maria and Jennifer J Manly and Zahodne, Laura B and Mayeda, Elizabeth Rose and Richard N Jones and Gross, Alden L} } @article {12666, title = {Cross-national harmonization of cognitive measures across HRS HCAP (USA) and LASI-DAD (India).}, journal = {PLoS One}, volume = {17}, year = {2022}, pages = {e0264166}, abstract = {

BACKGROUND: As global populations age, cross-national comparisons of cognitive health and dementia risk are increasingly valuable. It remains unclear, however, whether country-level differences in cognitive function are attributable to population differences or bias due to incommensurate measurement. To demonstrate an effective method for cross-national comparison studies, we aimed to statistically harmonize measures of episodic memory and language function across two population-based cohorts of older adults in the United States (HRS HCAP) and India (LASI-DAD).

METHODS: Data for 3,496 HRS HCAP (>=65 years) and 3,152 LASI-DAD (>=60 years) participants were statistically harmonized for episodic memory and language performance using confirmatory factor analysis (CFA) methods. Episodic memory and language factor variables were investigated for differential item functioning (DIF) and precision.

RESULTS: CFA models estimating episodic memory and language domains based on a priori adjudication of comparable items fit the data well. DIF analyses revealed that four out of ten episodic memory items and five out of twelve language items measured the underlying construct comparably across samples. DIF-modified episodic memory and language factor scores showed comparable patterns of precision across the range of the latent trait for each sample.

CONCLUSIONS: Harmonization of cognitive measures will facilitate future investigation of cross-national differences in cognitive performance and differential effects of risk factors, policies, and treatments, reducing study-level measurement and administrative influences. As international aging studies become more widely available, advanced statistical methods such as those described in this study will become increasingly central to making universal generalizations and drawing valid conclusions about cognitive aging of the global population.

}, keywords = {Cognition, cognitive aging, Episodic, HCAP, India, Language, LASI-DAD, Memory, Neuropsychological tests}, issn = {1932-6203}, doi = {10.1371/journal.pone.0264166}, author = {Vonk, Jet M J and Gross, Alden L and Zammit, Andrea R and Bertola, Laiss and Avila, Justina F and Jutten, Roos J and Gaynor, Leslie S and Suemoto, Claudia K and Lindsay C Kobayashi and O{\textquoteright}Connell, Megan E and Elugbadebo, Olufisayo and Amofa, Priscilla A and Staffaroni, Adam M and Arce Renter{\'\i}a, Miguel and Turney, Indira C and Richard N Jones and Jennifer J Manly and Lee, Jinkook and Zahodne, Laura B} } @article {11244, title = {You say tomato, I say radish: can brief cognitive assessments in the US Health Retirement Study be harmonized with its International Partner Studies?}, journal = {The Journals of Gerontology, Series B }, volume = {76}, year = {2021}, pages = {1767-1776}, abstract = {

OBJECTIVES: To characterize the extent to which brief cognitive assessments administered in the population-representative US Health and Retirement Study (HRS) and its International Partner Studies can be considered to be measuring a single, unidimensional latent cognitive function construct.

METHOD: Cognitive function assessments were administered in face-to-face interviews in 12 studies in 26 countries (N=155,690), including the US HRS and selected International Partner Studies. We used the time point of first cognitive assessment for each study to minimize differential practice effects across studies, and documented cognitive test item coverage across studies. Using confirmatory factor analysis models, we estimated single factor general cognitive function models, and bifactor models representing memory-specific and non-memory-specific cognitive domains for each study. We evaluated model fits and factor loadings across studies.

RESULTS: Despite relatively sparse and inconsistent cognitive item coverage across studies, all studies had some cognitive test items in common with other studies. In all studies, the bifactor models with a memory-specific domain fit better than single factor general cognitive function models. The data fit the models at reasonable thresholds for single factor models in six of the 12 studies, and for the bifactor models in all 12 of the 12 studies.

DISCUSSION: The cognitive assessments in the US HRS and its International Partner Studies reflect comparable underlying cognitive constructs. We discuss the assumptions underlying our methods, present alternatives, and future directions for cross-national harmonization of cognitive aging data.

}, keywords = {cognitive function, health survey, international comparison, item response theory, statistical harmonization}, issn = {1758-5368}, doi = {10.1093/geronb/gbaa205}, author = {Lindsay C Kobayashi and Alden L Gross and Laura E Gibbons and Tommet, Doug and Sanders, R Elizabeth and Choi, Seo-Eun and Mukherjee, Shubhabrata and M. Maria Glymour and Jennifer J Manly and Lisa F Berkman and Paul K Crane and Mungas, Dan M and Richard N Jones} } @article {10355, title = {The Health and Retirement Study Harmonized Cognitive Assessment Protocol Project: Study Design and Methods}, journal = {Neuroepidemiology}, year = {2020}, month = {2019}, abstract = {Introduction: The Harmonized Cognitive Assessment Protocol (HCAP) Project is a substudy within the Health and Retirement Study (HRS), an ongoing nationally representative panel study of about 20,000 adults aged 51 or older in the United States. The HCAP is part of an international research collaboration funded by the National Institute on Aging to better measure and identify cognitive impairment and dementia in representative population-based samples of older adults, in the context of ongoing longitudinal studies of aging in high-, middle-, and low-income countries around the world. Methods: The HCAP cognitive test battery was designed to measure a range of key cognitive domains affected by cognitive aging (including attention, memory, executive function, language, and visuospatial function) and to allow harmonization and comparisons to other studies in the United States and around the world. The HCAP included a pair of in-person interviews, one with the target HRS respondent (a randomly selected HRS sample member, aged 65+) that lasted approximately 1 h and one with an informant nominated by the respondent that lasted approximately 20 min. The final HRS HCAP sample included 3,496 study subjects, representing a 79\% response rate among those invited to participate. Conclusion: Linking detailed HCAP cognitive assessments to the wealth of available longitudinal HRS data on cognition, health, biomarkers, genetics, health care utilization, informal care, and economic resources and behavior will provide unique and expanded opportunities to study cognitive impairment and dementia in a nationally representative US population-based sample. The fielding of similar HCAP projects in multiple countries around the world will provide additional opportunities to study international differences in the prevalence, incidence, and outcomes of dementia globally with comparable data. Like all HRS data, HCAP data are publicly available at no cost to researchers.}, keywords = {Cognition, cognitive assessment, study design}, isbn = {0251-5350}, doi = {10.1159/000503004}, author = {Kenneth M. Langa and Lindsay H Ryan and Ryan J McCammon and Richard N Jones and Jennifer J Manly and Deborah A Levine and Amanda Sonnega and Farron, M. and David R Weir} } @article {7320, title = {Telephone interview for cognitive status: Creating a crosswalk with the Mini-Mental State Examination.}, journal = {Alzheimers Dement}, volume = {5}, year = {2009}, month = {2009 Nov}, pages = {492-7}, publisher = {5}, abstract = {

BACKGROUND: Brief cognitive screening measures are valuable tools for both research and clinical applications. The most widely used instrument, the Mini-Mental State Examination (MMSE), is limited in that it must be administered face-to-face, cannot be used in participants with visual or motor impairments, and is protected by copyright. Screening instruments such as the Telephone Interview for Cognitive Status (TICS) were developed to provide a valid alternative, with comparable cut-point scores to rate global cognitive function.

METHODS: The MMSE, TICS-30, and TICS-40 scores from 746 community-dwelling elders who participated in the Aging, Demographics, and Memory Study (ADAMS) were analyzed with equipercentile equating, a statistical process of determining comparable scores based on percentile equivalents for different forms of an examination.

RESULTS: Scores from the MMSE and TICS-30 and TICS-40 corresponded well, and clinically relevant cut-point scores were determined. For example, an MMSE score of 23 is equivalent to 17 and 20 on the TICS-30 and TICS-40, respectively.

CONCLUSIONS: These findings indicate that TICS and MMSE scores can be linked directly. Clinically relevant and important MMSE cut points and the respective ADAMS TICS-30 and TICS-40 cut-point scores are included, to identify the degree of cognitive impairment among respondents with any type of cognitive disorder. These results will help in the widespread application of TICS in both research and clinical practice.

}, keywords = {Aged, Aged, 80 and over, Alzheimer disease, Cognition Disorders, Disability Evaluation, Female, Geriatric Assessment, Health Status, Humans, Interviews as Topic, Male, Mass Screening, Models, Statistical, Neuropsychological tests, Predictive Value of Tests, Psychiatric Status Rating Scales, Remote Consultation, Reproducibility of Results, Sensitivity and Specificity}, issn = {1552-5279}, doi = {10.1016/j.jalz.2009.02.007}, author = {Tamara G Fong and Michael A Fearing and Richard N Jones and Peilin Shi and Edward R Marcantonio and James L Rudolph and Frances Margaret Yang and Dan K Kiely and Sharon K Inouye} }