% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@ARTICLE{Pillai:824688,
author = {Pillai, Rishi and Galiullin, Timur and Chyrkin, Anton and
Quadakkers, Willem J.},
title = {{M}ethods to {I}ncrease {C}omputtional {E}fficiency of
{CALPHAD}-{B}ased {T}hermodynamic and {K}inetic {M}odels
{E}mployed in {D}escribing {H}igh {T}emperature {M}aterial
{D}egradation},
journal = {Calphad},
volume = {53},
issn = {0364-5916},
address = {Amsterdam [u.a.]},
publisher = {Elsevier Science},
reportid = {FZJ-2016-07246},
pages = {62-71},
year = {2016},
abstract = {Coupled thermodynamic and kinetic models rely on the
thermodynamic and mobility databases which are compiled
using critically assessed thermodynamic and diffusivity data
acquired from various sources of experimental data. A
continuous influx of experimental thermodynamic and kinetic
data means that the respective databases have not only
increased in complexity but also in size. The time and
computational effort for the equilibrium calculations
increases with increasing number of components and phases to
be considered.In the present work, the applicability of a
few methods was investigated to increase the computational
efficiency of coupled thermodynamic and kinetic models.
Three cases of varying complexities in terms of the number
of phases, alloying elements and phenomena to be modelled
were considered for demonstration. The distribution of the
intensive thermodynamic calculations on multiple computing
cores using MPI (Message Passing Interface) was undertaken.
The interpolation scheme for dynamic storage of
thermodynamic data available in the commercial software
DICTRA was employed on a single computing core and the
resulting performance was compared with the MPI
computations. Additionally, the interpolation scheme was
also parallelised to test its scaling capability in
comparison to the computations performed solely with MPI.A
linear scaling of computation speeds was observed with
parallelisation of the thermodynamic calculations with MPI.
However, the degree of scaling was dependent on the
complexity of the calculation. The interpolation scheme on a
single core in comparison with MPI on 48 cores was 20 times
faster in one case but about 20–50 times slower in the
other two cases. A parallelisation of the interpolation
scheme improved its performance in the other two cases.
However, the computational scaling was still poor compared
to the MPI computations},
cin = {IEK-2},
ddc = {540},
cid = {I:(DE-Juel1)IEK-2-20101013},
pnm = {111 - Efficient and Flexible Power Plants (POF3-111) /
HITEC - Helmholtz Interdisciplinary Doctoral Training in
Energy and Climate Research (HITEC) (HITEC-20170406)},
pid = {G:(DE-HGF)POF3-111 / G:(DE-Juel1)HITEC-20170406},
typ = {PUB:(DE-HGF)16},
UT = {WOS:000377315100008},
doi = {10.1016/j.calphad.2016.03.004},
url = {https://juser.fz-juelich.de/record/824688},
}