% IMPORTANT: The following is UTF-8 encoded. This means that in the presence
% of non-ASCII characters, it will not work with BibTeX 0.99 or older.
% Instead, you should use an up-to-date BibTeX implementation like “bibtex8” or
% “biber”.
@INPROCEEDINGS{Wybo:1008841,
author = {Wybo, Willem and Tran, Viet Anh Khoa and Tsai, Matthias and
Illing, Bernd and Jordan, Jakob and Senn, Walter and
Morrison, Abigail},
title = {{D}endritic modulation for multitask representation
learning in deep feedforward networks},
reportid = {FZJ-2023-02506},
year = {2023},
abstract = {Feedforward sensory processing in the brain is generally
construed as proceeding through a hierar- chy of layers,
each constructing increasingly abstract and invariant
representations of sensory inputs. This interpretation is at
odds with the observation that activity in sensory
processing layers is heavily modulated by contextual
signals, such as cross modal information or internal mental
states [1]. While it is tempting to assume that such
modulations bias the feedforward processing pathway towards
de- tection of relevant input features given a context, this
induces a dependence on the contextual state in hidden
representations at any given layer. The next processing
layer in the hierarchy thus has to be able to extract
relevant information for each possible context. For this
reason, most machine learning approaches to multitask
learning apply task-specific output networks to
context-independent representations of the inputs, generated
by a shared trunk network.Here, we show that a network
motif, where a layer of modulated hidden neurons targets an
out- put neuron through task-independent feedforward
weights, solves multitask learning problems, and that this
network motif can be implemented with biophysically
realistic neurons that receive context- modulating synaptic
inputs on dendritic branches. The dendritic synapses in this
motif evolve ac- cording to a Hebbian plasticity rule
modulated by a global error signal. We then embed such a
motif in each layer of a deep feedforward network, where it
generates task-modulated representations of sensory inputs.
To learn feedforward weights to the next layer in the
network, we apply a contrastive learning objective that
predicts whether representations originate either from
different inputs, or from different task-modulations of the
same input. This self-supervised approach results in deep
represen- tation learning of feedforward weights that
accommodate a multitude of contexts, without relying on
error backpropagation between layers.},
month = {Mar},
date = {2023-03-08},
organization = {Cosyne 2023, Montreal (Canada), 8 Mar
2023 - 16 Mar 2023},
subtyp = {After Call},
cin = {INM-6 / IAS-6 / INM-10},
cid = {I:(DE-Juel1)INM-6-20090406 / I:(DE-Juel1)IAS-6-20130828 /
I:(DE-Juel1)INM-10-20170113},
pnm = {5232 - Computational Principles (POF4-523) / HBP SGA1 -
Human Brain Project Specific Grant Agreement 1 (720270) /
HBP SGA2 - Human Brain Project Specific Grant Agreement 2
(785907) / HBP SGA3 - Human Brain Project Specific Grant
Agreement 3 (945539) / SDS005 - Towards an integrated data
science of complex natural systems (PF-JARA-SDS005) /
neuroIC002 - Recurrence and stochasticity for neuro-inspired
computation (EXS-SF-neuroIC002)},
pid = {G:(DE-HGF)POF4-5232 / G:(EU-Grant)720270 /
G:(EU-Grant)785907 / G:(EU-Grant)945539 /
G:(DE-Juel-1)PF-JARA-SDS005 / G:(DE-82)EXS-SF-neuroIC002},
typ = {PUB:(DE-HGF)24},
doi = {10.34734/FZJ-2023-02506},
url = {https://juser.fz-juelich.de/record/1008841},
}