wup.tex 23.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
\documentclass[10pt]{article}
\usepackage{a4}
\usepackage{amsmath}

\oddsidemargin=0pt           % No extra space wasted after first inch.
\evensidemargin=0pt          % Ditto (for two-sided output).
\topmargin=0pt               % Same for top of page.
\headheight=0pt              % Don't waste any space on unused headers.
\headsep=0pt
\textwidth=16cm              % Effectively controls the right margin.
\textheight=24cm

\begin{document}
\centerline{\Large \bf Notes on TPG}
\bigskip
\centerline{\large May 2016}

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
\section{Optimisation of layer weights}
Each event $e$ gives some values $d_{e,i}$ of the deposited energy in 
layer $i$; these
can be in any units, e.g. MIPs.
Assume these are to be multiplied by some constant coefficients $a_i$ (which
are approximately the integrated dE/dx values if the $d_{e,i}$ are in MIPs)
to give the estimate of the incoming EM photon or electron energy. 
Hence, the energy
estimation for event $e$ is
\begin{equation} 
E_e = \sum_i a_i d_{e,i}
\end{equation} 
To find the optimal coefficients, then we need to know the truth energy per
event $T_e$. For a given set of coefficients, the RMS$^2$ of the energy 
around the truth value is given by
\begin{equation} 
\mathrm{RMS}^2 = \frac{1}{N} \sum_e (E_e - T_e)^2
= \frac{1}{N} \sum_e \left(\sum_i a_i d_{e,i} - T_e\right)^2
\end{equation} 
This can be thought of as similar to a chi-squared; we want to minimise this
expression. If all the $a_i$ are considered as independent parameters (so 28 for
the EE only), then explicitly
\begin{equation} 
\frac{\partial \mathrm{RMS}^2}{\partial a_j}
= \frac{1}{N} \sum_e 2d_{e,j} \left(\sum_i a_i d_{e,i} - T_e\right)
= \frac{2}{N} \sum_i a_i \left(\sum_e d_{e,j} d_{e,i} \right)
- \frac{2}{N} \sum_e d_{e,j} T_e 
\end{equation} 
Hence, for the minimum, we require
\begin{equation} 
\sum_i \frac{\sum_e d_{e,j} d_{e,i}}{N} a_i
= \frac{\sum_e d_{e,j} T_e}{N} 
\end{equation} 
Writing in matrix notation with $M$ and $v$ defined as
\begin{equation} 
M_{ji} = \frac{\sum_e d_{e,j} d_{e,i}}{N},\qquad
v_j = \frac{\sum_e d_{e,j} T_e}{N}
\end{equation} 
then the requirement is
\begin{equation} 
M a = v\qquad\mathrm{so}\qquad a = M^{-1}v
\end{equation}
Inverting the large matrix $M$ is required to give the solution for the
optimal $a_i$.
Note, $M$ is similar (but not identical) to the error matrix of the $d_i$.

dauncey's avatar
dauncey committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
The resulting RMS using the best fit values is given by
\begin{eqnarray*}
\mathrm{RMS}^2_\mathrm{min}
&=& \frac{1}{N} \sum_e \left[
\left(\sum_i a_i d_{e,i} \right)^2
- 2 T_e \sum_i a_i d_{e,i} + T_e^2 \right] \\
&=& \frac{1}{N} \sum_j \sum_i a_j a_i \sum_e d_{e,j} d_{e,i}
- \frac{2}{N} \sum_i a_i \sum_e T_e d_{e,i} + \frac{1}{N} \sum_e T_e^2 \\
&=& \sum_j \sum_i a_j a_i M_{ji}
- 2 \sum_i a_i v_i + \frac{1}{N} \sum_e T_e^2 
= a^T M a - 2 a^T v + \frac{1}{N} \sum_e T_e^2 
\end{eqnarray*}
But since the solution is defined by $Ma=v$, then $a^T M a = a^T v$. Hence
\begin{equation} 
dauncey's avatar
dauncey committed
78
79
80
\mathrm{RMS}^2_\mathrm{min} = \frac{1}{N} \left(\sum_e T_e^2\right) - a^T M a
= \frac{1}{N} \left(\sum_e T_e^2\right) - v^T M^{-1} v
= \frac{1}{N} \left(\sum_e T_e^2\right) - a^T v
dauncey's avatar
dauncey committed
81
\end{equation}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
The above can be extended slightly, which may improve the energy response 
linearity as well as the RMS. The energy estimation for the event (i.e. the 
first equation in this section) can be generally considered to be a polynomial 
in the $d_{e,i}$, but with the quadratic and higher terms neglected. However,
it also neglects any constant term. A more general expression would then be
to add another coefficient $b$ to give
\begin{equation} 
E_e = b + \sum_i a_i d_{e,i}
\end{equation} 
The easiest way to handle this is to allow the index $i$ to go one higher than
previously, specifically change from $i=0,27$ to $i=0,28$ and then define
$a_{28}=b$ and $d_{e,28}=1$. This means the expression simplifies to
\begin{equation} 
E_e = \sum_{i=0}^{28} a_i d_{e,i}
\end{equation} 
and so an identical calculation to previously holds, simply with the index 
running
over a larger range. Explicitly,
the matrix $M$ is now $29\times 29$ with the extra elements being
\begin{equation} 
M_{i,28} = M_{28,i} = \frac{1}{N} \sum_e d_{e,28}d_{e,i} 
= \frac{1}{N} \sum_e d_{e,i}
\end{equation} 
and
\begin{equation} 
M_{28,28} = \frac{1}{N} \sum_e d_{e,28}d_{e,28} = 1
\end{equation} 
while the extra element in $v$ is
\begin{equation} 
v_{28} = \frac{1}{N} \sum_e T_e d_{e,28} = \frac{1}{N} \sum_e T_e
\end{equation} 

114

115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
\section{Units}
Keeping quantities to 16-bit integers.

The FE ASIC works in fC with an overall LSB of 0.1\,fC and upper range of
10\,pC $= 10^4$\,fC. This requires 17 bits total (although represented as
a 10-bit and a 12-bit pair of values.

Reconstructed energy (not deposited energy) with an LSB of 10\,MeV and 16-bit 
unsigned representation gives a maximum energy of 655\,GeV. These are 
initially MIPS $\times \int (dE/dx)\,dx$ for each layer
until after forming the 3D clusters
when the total energy is set more exactly.

Position in $x$ and $y$ with an LSB of 100\,$\mu$m and a 16-bit signed 
representation gives a range of $\pm 328$\,cm (with $\pm 190$\,cm required).
If needed, $z$ can be represented in a 16-bit unsigned representation with the 
same LSB, giving a range up to 655\,cm (with 408\,cm required).
Note, the endcaps are handled
separately so the negative $z$ endcap can be treated like the positive
$z$ one.

Sine and cosines can be represented in a 16-bit signed representation 
where they are multiplied by $2^{15}$. Hence, the result of a multiplication
by this value needs to be stored in up to 31 bits and then bitshifted by 15.
Note, this does not allow a representation of exactly $+1$, 
i.e. for angles of 0
or $\pi/2$. Neither of these should occur in the HGC. Similarly, if needed,
$\tan(\theta)$ is in the appproximate range $\pm 0.1$ to $\pm 0.5$ and so can
be represented in the same way (and hence is similar to $\sin\theta$ for small
angles). Hence, the scaled variables $x/z = \tan\theta \cos\phi$ and
$y/z = \tan\theta \sin\phi$ can also have the same representation.

\section{FE ASIC TOT non-linearity}
Modelled as a response $r$ for an input charge $q$ given by
\begin{equation} 
r = 0\quad\mathrm{for}\ q<100\,\mathrm{fC},
\qquad r = q - \frac{100(100-q_0)}{q-q_0} 
= q \left[1 - \frac{100(100-q_0)}{q(q-q_0)}\right]
\quad \mathrm{otherwise}
\end{equation} 
where value of the
parameter is chosen to be $q_0=90$\,fC.
For $q(q-q_0) \gg 100(100-q_0)=1000$\,fC$^2$,
the non-linear term becomes negligible. 
E.g. for $q=200$\,fC, then $q(q-q_0) = 22000$ and so is a 5\% correction,
while for $q=400$\,fC, then $q(q-q_0) = 124000$ and so is a 0.8\% correction.

Inverting the above response function for $q \ge 100$\,fC, then
\begin{equation} 
r(q-q_0) = q(q-q_0) - 100(100-q_0)
\qquad\mathrm{so}\qquad
q^2-q(q_0+r)+rq_0 -100(100-q_0)
\end{equation} 
Hence
\begin{equation} 
q = \frac{1}{2}\left[q_0+r \pm \sqrt{(q_0+r)^2-4rq_0+400(100-q_0)}\right]
=\frac{q_0+r}{2} \pm \sqrt{\left(\frac{q_0-r}{2}\right)^2 + 100(100-q_0)}
\end{equation} 
where the positive sign is required for $q>100$\,fC.

\section{Link data representation}
The selected trigger cell 
177
178
data are calculated to a large number of bits, typically 16-18.
On the links, they need to be represented in a small number of bits $n$,
179
typically $\sim 8$.
180
181
182
This could be linear or logarithmic or floating.

\subsection{Linear representation}
183
184
185
186
187
188
189
190
191
192
For linear, then in general it can be linear betwen $x_\mathrm{min}$ and
$x_\mathrm{max}$ and 0 or $2^n-1$ outside this range. This can be represented
within the range by
\begin{equation}
y = \frac{(2^n-1)(x-x_\mathrm{min})}{x_\mathrm{max}-x_\mathrm{min}}
\end{equation} 
which can be inverted to give
\begin{equation}
x = x_\mathrm{min} + \frac{y(x_\mathrm{max}-x_\mathrm{min})}{2^n-1}
\end{equation} 
193
194

\subsection{Logarithmic representation}
195
For logarithmic, the general case would be $y=a\log(x)+b$ but with
196
$c=b/a$ and $x_\mathrm{min}=e^{-c}$, then
197
198
199
200
201
202
203
204
205
206
207
208
209
\begin{equation}
y = a\log(x)+b = a\log(x)+ac = a(\log(x)+c) = a(\log(x)-\log(x_\mathrm{min}))
= a \log(x/x_\mathrm{min})
\end{equation} 
Therefore
\begin{equation}
y = (2^n-1) \frac{\log(x/x_\mathrm{min})}{\log(x_\mathrm{max}/x_\mathrm{min})}
\end{equation} 
This can be inverted to give
\begin{equation}
x = x_\mathrm{min}\left(\frac{x_\mathrm{max}}{x_\mathrm{min}}\right)^{y/(2^n-1)}
\end{equation} 

210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
\subsection{Float representation}
Here, the $2^n$ values are split into an exponent of $E$ bits and a mantissa of
$M$ bits. The naive approach is simply to take the actual value as the
mantissa shifted up by $E$ bits. For example, for $E=2$ and $M=2$, then the
16 possible values would give the table below.

\bigskip
\begin{center}
\begin{tabular}{c|c|c|c}
\hline
Representation & Exponent & Mantissa & Value \cr\hline
 0 & 0b00 & 0b00 & 0b00000 = \phantom{2}0\cr
 1 & 0b00 & 0b01 & 0b00001 = \phantom{2}1\cr
 2 & 0b00 & 0b10 & 0b00010 = \phantom{2}2\cr
 3 & 0b00 & 0b11 & 0b00011 = \phantom{2}3\cr
 4 & 0b01 & 0b00 & 0b00000 = \phantom{2}0\cr
 5 & 0b01 & 0b01 & 0b00100 = \phantom{2}2\cr
 6 & 0b01 & 0b10 & 0b01000 = \phantom{2}4\cr
 7 & 0b01 & 0b11 & 0b01100 = \phantom{2}6\cr
 8 & 0b10 & 0b00 & 0b00000 = \phantom{2}0\cr
 9 & 0b10 & 0b01 & 0b00100 = \phantom{2}4\cr
10 & 0b10 & 0b10 & 0b01000 = \phantom{2}8\cr
11 & 0b10 & 0b11 & 0b01100 = 12 \cr
12 & 0b11 & 0b00 & 0b00000 = \phantom{2}0\cr
13 & 0b11 & 0b01 & 0b01000 = \phantom{2}8\cr
14 & 0b11 & 0b10 & 0b10000 = 16 \cr
15 & 0b11 & 0b11 & 0b11000 = 24 \cr
\hline
\end{tabular}
\end{center}

It is clear this is neither monotonic nor efficient, as the same values appear
for several representations.
A better representation is made by realising that for all but the lowest
exponent representations, there is always a leading bit. Hence, this does not
have to be stored explicitly. This means this leading bit must be added to the
mantissa before the bit shift, and since this increments the length by one bit,
then the shift up needed is only $E-1$. The table below shows this improved
representation. It is monotonic, there are no duplicates, and the lowest two
exponent ranges give an exact representation.

\bigskip
\begin{center}
\begin{tabular}{c|c|c|c}
\hline
Representation & Exponent & Mantissa & Value \cr\hline
 0 & 0b00 & 0b00 & 0b00000 = \phantom{2}0\cr
 1 & 0b00 & 0b01 & 0b00001 = \phantom{2}1\cr
 2 & 0b00 & 0b10 & 0b00010 = \phantom{2}2\cr
 3 & 0b00 & 0b11 & 0b00011 = \phantom{2}3\cr
 4 & 0b01 & 0b00 & 0b00100 = \phantom{2}4\cr
 5 & 0b01 & 0b01 & 0b00101 = \phantom{2}5\cr
 6 & 0b01 & 0b10 & 0b00110 = \phantom{2}6\cr
 7 & 0b01 & 0b11 & 0b00111 = \phantom{2}7\cr
 8 & 0b10 & 0b00 & 0b01000 = \phantom{2}8\cr
 9 & 0b10 & 0b01 & 0b01010 = 10 \cr
10 & 0b10 & 0b10 & 0b01100 = 12 \cr
11 & 0b10 & 0b11 & 0b01110 = 14 \cr
12 & 0b11 & 0b00 & 0b10000 = 16 \cr
13 & 0b11 & 0b01 & 0b10100 = 20 \cr
14 & 0b11 & 0b10 & 0b11000 = 24 \cr
15 & 0b11 & 0b11 & 0b11100 = 28 \cr
\hline
\end{tabular}
\end{center}

In this improved representation, the mantissa has $M+1$ bits (except in
the lowest exponent range). The exponent can represent numbers up to
$2^E-1$ and hence will bit shift by a maximum of $2^E-2$ bits.
Hence, the number of bits in the representation is $M+E$ bits, while
the maximum number represented has $M+1+2^E-2 = M+2^E-1$ bits, i.e. is
is less than $2^{M+2^E-1}$. The reduction is $2^E-E-1$ bits.
For the example of $M=2$, $E=2$ 
in the table above, this gives $2-1+4=5$ bits, i.e.
numbers up to $2^5=32$ as shown and the reduction is 1 bit.

For the extreme values of $E$, then $E=0$ and $E=1$ both give an exact
representation as they only use the lowest range or two lowest ranges,
respectively. The reduction is $2^0-0-1=0$ and $2^1-1-1=0$ bits in both cases. 
Explicitly, for $E=0$, then the representation has $M$ bits
while the value represented has $M$
bits also, i.e. the reduction is 0 bits.
For $E=1$, the representation has $M+1$ bits, while the 
value represented has $M+1$ bits also, again with a reduction of 0 bits.

For the extreme value of $M=0$, then the representation is
just the number of bits in the input word. E.g. for $M=0$, $E=4$, then the
table is given below. The value range is less thn $2^{15}=32768$.

\bigskip
\begin{center}
\begin{tabular}{c|c|c|c}
\hline
Representation & Exponent & Mantissa & Value \cr\hline
 0 & 0b0000 & 0 & 0b000000000000000 = \phantom{1222}0\cr
 1 & 0b0001 & 0 & 0b000000000000001 = \phantom{1222}1\cr
 2 & 0b0010 & 0 & 0b000000000000010 = \phantom{1222}2\cr
 3 & 0b0011 & 0 & 0b000000000000100 = \phantom{1222}4\cr
 4 & 0b0100 & 0 & 0b000000000001000 = \phantom{1222}8\cr
 5 & 0b0101 & 0 & 0b000000000010000 = \phantom{122}16\cr
 6 & 0b0110 & 0 & 0b000000000100000 = \phantom{122}32\cr
 7 & 0b0111 & 0 & 0b000000001000000 = \phantom{122}64\cr
 8 & 0b1000 & 0 & 0b000000010000000 = \phantom{12}128\cr
 9 & 0b1001 & 0 & 0b000000100000000 = \phantom{12}256 \cr
10 & 0b1010 & 0 & 0b000001000000000 = \phantom{12}512 \cr
11 & 0b1011 & 0 & 0b000010000000000 = \phantom{1}1024 \cr
12 & 0b1100 & 0 & 0b000100000000000 = \phantom{1}2048 \cr
13 & 0b1101 & 0 & 0b001000000000000 = \phantom{1}4096 \cr
14 & 0b1110 & 0 & 0b010000000000000 = \phantom{1}8192 \cr
15 & 0b1111 & 0 & 0b100000000000000 = 16384 \cr
\hline
\end{tabular}
\end{center}

The maximum bit lengths of the value, i.e. $M+2^E-1$, for various values of 
$M$ and $E$ are shown in the table below.

\bigskip
\begin{center}
\begin{tabular}{r||c|c|c|c|c|c|c|c|c}
\hline
$M=$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \cr\hline
$E=0$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \cr
1 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \cr
2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 \cr
3 & 7 & 8 & 9 & 10 & 11 & 12 & 13 & 14 & 15 \cr
4 & 15 & 16 & 17 & 18 & 19 & 20 & 21 & 22 & 23 \cr
5 & 31 & 32 & 33 & 34 & 35 & 36 & 37 & 38 & 39 \cr
6 & 63 & 64 & 65 & 66 & 67 & 68 & 69 & 70 & 71 \cr
7 & 127 & 128 & 129 & 130 & 131 & 132 & 133 & 134 & 135 \cr
8 & 255 & 256 & 257 & 258 & 259 & 260 & 261 & 262 & 263 \cr
\hline
\end{tabular}
\end{center}


346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630

\section{Template fit of energy in depth}
Assume a template shape for a photon of $P_l$ per photon energy GeV 
for layer $l$.
The minimum bias gives a template shape of $M_l$ for layer $l$ in some
arbitrary units. The total expected per layer will then be
$E_l = E_pP_l + E_m M_l$ for a photon energy $E_p$ and some scaling $E_m$ of the
minimum bias template. Hence, the chi-squared compared to the observed
energy $O_l$ will be
\begin{equation} 
\chi^2 = \sum_l \frac{(E_p P_l + E_m M_l - O_l)^2}{\sigma_l^2}
\end{equation}
The $\sigma_l$ are given by the photon and minimum bias shower
fluctuations around
the average of the template. As such, the errors will depend on $E_p$ and
$E_m$. However, ``expected'' values can be used initially to fix the
$\sigma_l$ so that the problem remains linear. It could then be iterated 
several times with improved values to get a better fit.

Minimising the chi-squared requires
\begin{equation} 
\frac{d\chi^2}{dE_p} = \sum_l \frac{2P_l(E_p P_l + E_m M_l - O_l)}{\sigma_l^2}=0,\qquad
\frac{d\chi^2}{dE_m} = \sum_l \frac{2M_l(E_p P_l + E_m M_l - O_l)}{\sigma_l^2}=0
\end{equation}
such that
\begin{equation} 
E_p \sum_l \frac{P_l^2}{\sigma_l^2} + E_m\sum_l \frac{P_lM_l}{\sigma_l^2}
= \sum_l \frac{O_l P_l}{\sigma_l^2},
\qquad
E_p \sum_l \frac{P_l M_l}{\sigma_l^2} + E_m\sum_l \frac{M_l^2}{\sigma_l^2}
= \sum_l \frac{O_l M_l}{\sigma_l^2},
\end{equation}
This can be written as a matrix equation
\begin{equation} 
\begin{pmatrix}
\sum_l \frac{P_l^2}{\sigma_l^2} & \sum_l \frac{P_l M_l}{\sigma_l^2} \\
\sum_l \frac{P_l M_l}{\sigma_l^2} & \sum_l \frac{M_l^2}{\sigma_l^2}
\end{pmatrix}
\begin{pmatrix}
E_p \\ E_m
\end{pmatrix} =
\begin{pmatrix}
\sum_l \frac{O_l P_l}{\sigma_l^2} \\
\sum_l \frac{O_l M_l}{\sigma_l^2}
\end{pmatrix}
\end{equation}
As long as the $P_l$ and $M_l$ are not proportional to each other, the
matrix on the left can be inverted to solve for $E_p$ (and $E_m$).
This matrix is a constant for all events and so can be precalculated and
inverted once, offline. The vector on the right must be calculated per
event. However, explicitly the matrix determinant is
\begin{equation} 
\Delta = \left(\sum_l \frac{P_l^2}{\sigma_l^2}\right)
\left(\sum_l \frac{M_l^2}{\sigma_l^2}\right)
- \left(\sum_l \frac{P_l M_l}{\sigma_l^2} \right)^2
\end{equation}
so the inverse is
\begin{equation} 
\frac{1}{\Delta}
\begin{pmatrix}
\sum_l \frac{M_l^2}{\sigma_l^2} & -\sum_l \frac{P_l M_l}{\sigma_l^2} \\
-\sum_l \frac{P_l M_l}{\sigma_l^2} & \sum_l \frac{P_l^2}{\sigma_l^2}
\end{pmatrix}
\end{equation}
and hence
\begin{equation} 
\begin{pmatrix}
E_p \\ E_m
\end{pmatrix} =
\frac{1}{\Delta}
\begin{pmatrix}
\sum_{l^\prime} \frac{M_{l^\prime}^2}{\sigma_{l^\prime}^2} & -\sum_{l^\prime} \frac{P_{l^\prime} M_{l^\prime}}{\sigma_{l^\prime}^2} \\
-\sum_{l^\prime} \frac{P_{l^\prime} M_{l^\prime}}{\sigma_{l^\prime}^2} & \sum_{l^\prime} \frac{P_{l^\prime}^2}{\sigma_{l^\prime}^2}
\end{pmatrix}
\begin{pmatrix}
\sum_l \frac{O_l P_l}{\sigma_l^2} \\
\sum_l \frac{O_l M_l}{\sigma_l^2}
\end{pmatrix}
\end{equation}
which means
\begin{equation} 
\begin{pmatrix}
E_p \\ E_m
\end{pmatrix} =
\begin{pmatrix}
\sum_l O_l 
\left[\frac{P_l}{\Delta \sigma_l^2}
\left( \sum_{l^\prime} \frac{M_{l^\prime}^2}{\sigma_{l^\prime}^2}\right)
- \frac{M_l}{\Delta \sigma_l^2}
\left( \sum_{l^\prime} \frac{P_{l^\prime} M_{l^\prime}}{\sigma_{l^\prime}^2}\right) \right]\\
\sum_l O_l 
\left[\frac{M_l}{\Delta \sigma_l^2}
\left(\sum_{l^\prime} \frac{P_{l^\prime}^2}{\sigma_{l^\prime}^2}\right)
-\frac{P_l}{\Delta \sigma_l^2}
\left(\sum_{l^\prime} \frac{P_{l^\prime} M_{l^\prime}}{\sigma_{l^\prime}^2}\right)\right]
\end{pmatrix}
\end{equation}
Hence
\begin{equation} 
E_p = \sum_l O_l A_l,\qquad
E_m = \sum_l O_l B_l
\end{equation}
where $A_l$ and $B_l$ correspond to the quantities in the square brackets
and can be precalculated, except for any subtleties with the errors.

ERROR MATRIX

\section{Comparing coordinates in plane polars}
On a given layer, then comparing e.g. a track extrapolation to a cluster 
position requires a difference of the two points in 2D;
$x_1$, $y_1$ and $x_2$, $y_2$.
This should be done
in coordinates which preserve the cylindrical (i.e. plane polar for a layer)
geometry. The obvious two are
\begin{equation} 
\Delta\rho = \rho_2-\rho_1 = \sqrt{x_2^2+y_2^2}-\sqrt{x_1^2+y_1^2},
\qquad
\Delta\phi = \phi_2 - \phi_1 = \tan^{-1}(y_2/x_2) - \tan^{-1}(y_1/x_1)
\end{equation}
However, $\Delta\phi$ has two issues; firstly is that it is not a length 
variable
and so makes comparison with $\Delta\rho$ difficult, and secondly that there is
a discontinuity in $\phi$ which needs to be handled.

A length variable can be formed using some radius value $\overline{\rho}$
to give $\overline{\rho}\Delta\phi$
but there is an ambiguity about which radius to use; $\rho_1$, $\rho_2$ or
some average of these. One desirable property is that the two
variables should preserve the total length, i.e.
\begin{equation} 
\Delta\rho^2 + \overline{\rho}^2\Delta\phi^2
= \Delta x^2 + \Delta y^2 = (x_2-x_1)^2 + (y_2-y_1)^2
= x_2^2+y_2^2+x_1^2+y_1^2 - 2(x_1x_2+y_1y_2)
\end{equation}
Using the expression for $\Delta\rho$ above, then
\begin{equation} 
\Delta\rho^2 = x_2^2+y_2^2+x_1^2+y_1^2 - 2\sqrt{x_2^2+y_2^2}\sqrt{x_1^2+y_1^2}
\end{equation}
so that
\begin{equation} 
\overline{\rho}^2\Delta\phi^2
=2\sqrt{x_2^2+y_2^2}\sqrt{x_1^2+y_1^2}- 2(x_1x_2+y_1y_2)
\end{equation}
Expressing the right hand side in plane polars gives
\begin{equation} 
\overline{\rho}^2\Delta\phi^2
=2\rho_1\rho_2 - 2\rho_1\rho_2 (\cos\phi_1\cos\phi_2 + \sin\phi_1\sin\phi_2)
=2\rho_1\rho_2 [1-\cos(\phi_2-\phi_1)]=2\rho_1\rho_2 (1-\cos\Delta\phi)
\end{equation}
This effectively defines $\overline{\rho}$ and hence the second variable
directly. Note there is no issue with the $\phi$ discontinuity as this is
handled automatically by the cosine.
For small $\Delta\phi$, then the above expression is approximated by
\begin{equation} 
\overline{\rho}^2\Delta\phi^2
\approx 2\rho_1\rho_2 \frac{\Delta\phi^2}{2}
\approx \rho_1\rho_2 \Delta\phi^2
\end{equation}
so that $\overline{\rho} \approx \sqrt{\rho_1\rho_2}$, i.e. the geometric
mean.

Note that the sign of
the second variable is not defined by the above; it should be the same as 
the sign of $\Delta\phi$. However, since
\begin{equation} 
1 - \cos\Delta\phi = 2\sin^2\left(\frac{\Delta\phi}{2}\right)
\end{equation}
then
\begin{equation} 
\overline{\rho}^2\Delta\phi^2
=4\rho_1\rho_2 \sin^2\left(\frac{\Delta\phi}{2}\right)
\end{equation}
and so
\begin{equation} 
\overline{\rho}\Delta\phi
=2\sqrt{\rho_1\rho_2} \sin\left(\frac{\Delta\phi}{2}\right)
\end{equation}
where the positive sign for the square-root is taken to agree with $\Delta\phi$.
Again, for small $\Delta\phi$, then $\overline{\rho}$
clearly approximates to the geometric
mean of the two radii, as before.

\section{Shower position and direction fit}

\section{Motion in a magnetic field}

\section{Inverting matrices}
A symmetric $3\times 3$ matrix can be written as
\begin{equation} 
M=
\begin{pmatrix}
M_{00} & M_{01} & M_{02} \\
M_{01} & M_{11} & M_{12} \\
M_{02} & M_{12} & M_{22} 
\end{pmatrix}
\end{equation}
Its determinant is then
\begin{eqnarray} 
\Delta
&=&
M_{00} \begin{vmatrix}
M_{11} & M_{12} \\
M_{12} & M_{22} 
\end{vmatrix}
-M_{01} \begin{vmatrix}
M_{01} & M_{12} \\
M_{02} & M_{22} 
\end{vmatrix}
+M_{02} \begin{vmatrix}
M_{01} & M_{11} \\
M_{02} & M_{12} 
\end{vmatrix}\\
&=& M_{00}M_{11}M_{22}-M_{00}M_{12}M_{12}
 -M_{01}M_{01}M_{22}+M_{01}M_{12}M_{02}
 +M_{02}M_{01}M_{12}-M_{02}M_{11}M_{02}\\
&=& M_{00}M_{11}M_{22}+2M_{01}M_{12}M_{02}
-M_{00}M_{12}^2 -M_{11}M_{02}^2 -M_{22}M_{01}^2
\end{eqnarray}
This can be written is several ways
\begin{eqnarray} 
\Delta
&=& M_{00}(M_{11}M_{22}-M_{12}^2)
+M_{01}(M_{12}M_{02}-M_{22}M_{01})
+M_{02}(M_{01}M_{12}-M_{11}M_{02})\\
&=& M_{01}(M_{12}M_{02}-M_{22}M_{01})
+M_{11}(M_{00}M_{22}-M_{02}^2)
+M_{12}(M_{01}M_{02}-M_{00}M_{12})\\
&=&M_{02}(M_{01}M_{12}-M_{11}M_{02})
+M_{12}(M_{01}M_{02}-M_{00}M_{12})
+M_{22}(M_{00}M_{11}-M_{01}^2)
\end{eqnarray}
which means the inverse must be
\begin{equation} 
M^{-1}=
\frac{1}{\Delta}
\begin{pmatrix}
M_{11}M_{22}-M_{12}^2 & M_{12}M_{02}-M_{22}M_{01} & M_{01}M_{12}-M_{11}M_{02} \\
M_{12}M_{02}-M_{22}M_{01} & M_{00}M_{22}-M_{02}^2 & M_{01}M_{02}-M_{00}M_{12} \\
M_{01}M_{12}-M_{11}M_{02} & M_{01}M_{02}-M_{00}M_{12} & M_{00}M_{11}-M_{01}^2
\end{pmatrix}
\end{equation}
Note, if variable 2 becomes uncorrelated with variables 0 and 1, then 
$M_{02}=M_{12}=0$ so
\begin{equation} 
\Delta
=M_{00}M_{11}M_{22}-M_{22}M_{01}^2= M_{22}(M_{00}M_{11}-M_{01}^2) = M_{22}\Delta_2
\end{equation}
and
\begin{equation} 
M^{-1}=
\frac{1}{\Delta}
\begin{pmatrix}
M_{11}M_{22} & -M_{22}M_{01} & 0 \\
-M_{22}M_{01} & M_{00}M_{22} & 0 \\
0 & 0 & M_{00}M_{11}-M_{01}^2
\end{pmatrix}
=\frac{1}{\Delta_2}
\begin{pmatrix}
M_{11} & -M_{01} & 0 \\
-M_{01} & M_{00} & 0 \\
0 & 0 & \Delta_2/M_{22}
\end{pmatrix}
=
\begin{pmatrix}
M_2^{-1} & 0 \\
0 & 1/M_{22}
\end{pmatrix}
\end{equation}
as expected. The inverse of the $2\times 2$ matrix is not the submatrix in
the inverse of the $3\times 3$ matrix. E.g. for the first element in the
inverse of the $2\times 2$ matrix, this is
\begin{equation} 
M_{00}^{-1} 
= \frac{M_{11}}{M_{00}M_{11}-M_{01}^2}
\end{equation}
while in the $3 \times 3$ case, this is 
\begin{eqnarray}
M_{00}^{-1} 
&=& \frac{M_{11}M_{22}-M_{12}^2}{M_{00}M_{11}M_{22}+2M_{01}M_{12}M_{02}
-M_{00}M_{12}^2 -M_{11}M_{02}^2 -M_{22}M_{01}^2}\\
&=& \frac{M_{11}-(M_{12}^2/M_{22})}{M_{00}M_{11}-M_{01}^2
+(2M_{01}M_{12}M_{02} -M_{00}M_{12}^2 -M_{11}M_{02}^2)/M_{22}}
\end{eqnarray}

\end{document}