Initial commit of OpenSPARC T2 design and verification files.
[OpenSPARC-T2-DV] / tools / perl-5.8.0 / man / man3 / Math::MatrixReal.3
CommitLineData
86530b38
AT
1.\" Automatically generated by Pod::Man v1.34, Pod::Parser v1.13
2.\"
3.\" Standard preamble:
4.\" ========================================================================
5.de Sh \" Subsection heading
6.br
7.if t .Sp
8.ne 5
9.PP
10\fB\\$1\fR
11.PP
12..
13.de Sp \" Vertical space (when we can't use .PP)
14.if t .sp .5v
15.if n .sp
16..
17.de Vb \" Begin verbatim text
18.ft CW
19.nf
20.ne \\$1
21..
22.de Ve \" End verbatim text
23.ft R
24.fi
25..
26.\" Set up some character translations and predefined strings. \*(-- will
27.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
28.\" double quote, and \*(R" will give a right double quote. | will give a
29.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
30.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
31.\" expand to `' in nroff, nothing in troff, for use with C<>.
32.tr \(*W-|\(bv\*(Tr
33.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
34.ie n \{\
35. ds -- \(*W-
36. ds PI pi
37. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
38. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
39. ds L" ""
40. ds R" ""
41. ds C` ""
42. ds C' ""
43'br\}
44.el\{\
45. ds -- \|\(em\|
46. ds PI \(*p
47. ds L" ``
48. ds R" ''
49'br\}
50.\"
51.\" If the F register is turned on, we'll generate index entries on stderr for
52.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
53.\" entries marked with X<> in POD. Of course, you'll have to process the
54.\" output yourself in some meaningful fashion.
55.if \nF \{\
56. de IX
57. tm Index:\\$1\t\\n%\t"\\$2"
58..
59. nr % 0
60. rr F
61.\}
62.\"
63.\" For nroff, turn off justification. Always turn off hyphenation; it makes
64.\" way too many mistakes in technical documents.
65.hy 0
66.if n .na
67.\"
68.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
69.\" Fear. Run. Save yourself. No user-serviceable parts.
70. \" fudge factors for nroff and troff
71.if n \{\
72. ds #H 0
73. ds #V .8m
74. ds #F .3m
75. ds #[ \f1
76. ds #] \fP
77.\}
78.if t \{\
79. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
80. ds #V .6m
81. ds #F 0
82. ds #[ \&
83. ds #] \&
84.\}
85. \" simple accents for nroff and troff
86.if n \{\
87. ds ' \&
88. ds ` \&
89. ds ^ \&
90. ds , \&
91. ds ~ ~
92. ds /
93.\}
94.if t \{\
95. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
96. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
97. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
98. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
99. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
100. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
101.\}
102. \" troff and (daisy-wheel) nroff accents
103.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
104.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
105.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
106.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
107.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
108.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
109.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
110.ds ae a\h'-(\w'a'u*4/10)'e
111.ds Ae A\h'-(\w'A'u*4/10)'E
112. \" corrections for vroff
113.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
114.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
115. \" for low resolution devices (crt and lpr)
116.if \n(.H>23 .if \n(.V>19 \
117\{\
118. ds : e
119. ds 8 ss
120. ds o a
121. ds d- d\h'-1'\(ga
122. ds D- D\h'-1'\(hy
123. ds th \o'bp'
124. ds Th \o'LP'
125. ds ae ae
126. ds Ae AE
127.\}
128.rm #[ #] #H #V #F C
129.\" ========================================================================
130.\"
131.IX Title "MatrixReal 3"
132.TH MatrixReal 3 "2002-05-15" "perl v5.8.0" "User Contributed Perl Documentation"
133.SH "NAME"
134Math::MatrixReal \- Matrix of Reals
135.PP
136Implements the data type "matrix of reals" (and consequently also
137"vector of reals").
138.SH "DESCRIPTION"
139.IX Header "DESCRIPTION"
140Implements the data type \*(L"matrix of reals\*(R", which can be used almost
141like any other basic Perl type thanks to \fB\s-1OPERATOR\s0 \s-1OVERLOADING\s0\fR, i.e.,
142.PP
143.Vb 1
144\& $product = $matrix1 * $matrix2;
145.Ve
146.PP
147does what you would like it to do (a matrix multiplication).
148.PP
149Also features many important operations and methods: matrix norm,
150matrix transposition, matrix inverse, determinant of a matrix, order
151and numerical condition of a matrix, scalar product of vectors, vector
152product of vectors, vector length, projection of row and column vectors,
153a comfortable way for reading in a matrix from a file, the keyboard or
154your code, and many more.
155.PP
156Allows to solve linear equation systems using an efficient algorithm
157known as \*(L"L\-R\-decomposition\*(R" and several approximative (iterative) methods.
158.PP
159Features an implementation of Kleene's algorithm to compute the minimal
160costs for all paths in a graph with weighted edges (the \*(L"weights\*(R" being
161the costs associated with each edge).
162.SH "SYNOPSIS"
163.IX Header "SYNOPSIS"
164.Sh "Constructor Methods And Such"
165.IX Subsection "Constructor Methods And Such"
166.RE
167.IP "\(bu"
168\&\f(CW\*(C`use Math::MatrixReal;\*(C'\fR
169.PP
170Makes the methods and overloaded operators of this module available
171to your program.
172.RE
173.IP "\(bu"
174\&\f(CW\*(C`$new_matrix = new Math::MatrixReal($rows,$columns);\*(C'\fR
175.PP
176The matrix object constructor method. A new matrix of size \f(CW$rows\fR by \f(CW$columns\fR
177will be created, with the value \f(CW0.0\fR for all elements.
178.PP
179Note that this method is implicitly called by many of the other methods
180in this module.
181.RE
182.IP "\(bu"
183\&\f(CW\*(C`$new_matrix = $some_matrix\->\*(C'\fR\f(CW\*(C`new($rows,$columns);\*(C'\fR
184.PP
185Another way of calling the matrix object constructor method.
186.PP
187Matrix "\f(CW$some_matrix\fR" is not changed by this in any way.
188.RE
189.IP "\(bu"
190\&\f(CW\*(C`$new_matrix = $matrix\->new_from_cols( [ $column_vector|$array_ref|$string, ... ] )\*(C'\fR
191.PP
192Creates a new matrix given a reference to an array of any of the following:
193.IP "\(bu column vectors ( n by 1 Math::MatrixReal matrices )" 4
194.IX Item "column vectors ( n by 1 Math::MatrixReal matrices )"
195.PD 0
196.IP "\(bu references to arrays" 4
197.IX Item "references to arrays"
198.ie n .IP "\(bu strings properly formatted to create a column with Math::MatrixReal's ""new_from_string"" command" 4
199.el .IP "\(bu strings properly formatted to create a column with Math::MatrixReal's \f(CWnew_from_string\fR command" 4
200.IX Item "strings properly formatted to create a column with Math::MatrixReal's new_from_string command"
201.PD
202.PP
203You may mix and match these as you wish. However, all must be of the
204same dimension\*(--no padding happens automatically. Example:
205.PP
206.Vb 2
207\& my $matrix = Math::MatrixReal->new_from_cols( [ [1,2], [3,4] ] );
208\& print $matrix;
209.Ve
210.PP
211will print
212.PP
213.Vb 2
214\& [ 1.000000000000E+00 3.000000000000E+00 ]
215\& [ 2.000000000000E+00 4.000000000000E+00 ]
216.Ve
217.RE
218.IP "\(bu"
219\&\f(CW\*(C`new_from_rows( [ $row_vector|$array_ref|$string, ... ] )\*(C'\fR
220.PP
221Creates a new matrix given a reference to an array of any of the following:
222.IP "\(bu row vectors ( 1 by n Math::MatrixReal matrices )" 4
223.IX Item "row vectors ( 1 by n Math::MatrixReal matrices )"
224.PD 0
225.IP "\(bu references to arrays" 4
226.IX Item "references to arrays"
227.ie n .IP "\(bu strings properly formatted to create a row with Math::MatrixReal's ""new_from_string"" command" 4
228.el .IP "\(bu strings properly formatted to create a row with Math::MatrixReal's \f(CWnew_from_string\fR command" 4
229.IX Item "strings properly formatted to create a row with Math::MatrixReal's new_from_string command"
230.PD
231.PP
232You may mix and match these as you wish. However, all must be of the
233same dimension\*(--no padding happens automatically. Example:
234.PP
235.Vb 2
236\& my $matrix = Math::MatrixReal->new_from_rows( [ [1,2], [3,4] ] );
237\& print $matrix;
238.Ve
239.PP
240will print
241.PP
242.Vb 2
243\& [ 1.000000000000E+00 2.000000000000E+00 ]
244\& [ 3.000000000000E+00 4.000000000000E+00 ]
245.Ve
246.RE
247.IP "\(bu"
248\&\f(CW\*(C`$new_matrix = Math::MatrixReal\->new_diag( $array_ref );\*(C'\fR
249.PP
250This method allows you to create a diagonal matrix by only specifying
251the diagonal elements. Example:
252.PP
253.Vb 2
254\& $matrix = Math::MatrixReal->new_diag( [ 1,2,3,4 ] );
255\& print $matrix;
256.Ve
257.PP
258will print
259.PP
260.Vb 4
261\& [ 1.000000000000E+00 0.000000000000E+00 0.000000000000E+00 0.000000000000E+00 ]
262\& [ 0.000000000000E+00 2.000000000000E+00 0.000000000000E+00 0.000000000000E+00 ]
263\& [ 0.000000000000E+00 0.000000000000E+00 3.000000000000E+00 0.000000000000E+00 ]
264\& [ 0.000000000000E+00 0.000000000000E+00 0.000000000000E+00 4.000000000000E+00 ]
265.Ve
266.RE
267.IP "\(bu"
268\&\f(CW\*(C`$new_matrix = Math::MatrixReal\->\*(C'\fR\f(CW\*(C`new_from_string($string);\*(C'\fR
269.PP
270This method allows you to read in a matrix from a string (for
271instance, from the keyboard, from a file or from your code).
272.PP
273The syntax is simple: each row must start with "\f(CW\*(C`[ \*(C'\fR\*(L" and end with
274\&\*(R"\f(CW\*(C` ]\en\*(C'\fR\*(L" (\*(R"\f(CW\*(C`\en\*(C'\fR\*(L" being the newline character and \*(R"\f(CW\*(C` \*(C'\fR" a space or
275tab) and contain one or more numbers, all separated from each other
276by spaces or tabs.
277.PP
278Additional spaces or tabs can be added at will, but no comments.
279.PP
280Examples:
281.PP
282.Vb 3
283\& $string = "[ 1 2 3 ]\en[ 2 2 -1 ]\en[ 1 1 1 ]\en";
284\& $matrix = Math::MatrixReal->new_from_string($string);
285\& print "$matrix";
286.Ve
287.PP
288By the way, this prints
289.PP
290.Vb 3
291\& [ 1.000000000000E+00 2.000000000000E+00 3.000000000000E+00 ]
292\& [ 2.000000000000E+00 2.000000000000E+00 -1.000000000000E+00 ]
293\& [ 1.000000000000E+00 1.000000000000E+00 1.000000000000E+00 ]
294.Ve
295.PP
296But you can also do this in a much more comfortable way using the
297shell-like \*(L"here\-document\*(R" syntax:
298.PP
299.Vb 9
300\& $matrix = Math::MatrixReal->new_from_string(<<'MATRIX');
301\& [ 1 0 0 0 0 0 1 ]
302\& [ 0 1 0 0 0 0 0 ]
303\& [ 0 0 1 0 0 0 0 ]
304\& [ 0 0 0 1 0 0 0 ]
305\& [ 0 0 0 0 1 0 0 ]
306\& [ 0 0 0 0 0 1 0 ]
307\& [ 1 0 0 0 0 0 -1 ]
308\& MATRIX
309.Ve
310.PP
311You can even use variables in the matrix:
312.PP
313.Vb 3
314\& $c1 = 2 / 3;
315\& $c2 = -2 / 5;
316\& $c3 = 26 / 9;
317.Ve
318.PP
319.Vb 1
320\& $matrix = Math::MatrixReal->new_from_string(<<"MATRIX");
321.Ve
322.PP
323.Vb 3
324\& [ 3 2 0 ]
325\& [ 0 3 2 ]
326\& [ $c1 $c2 $c3 ]
327.Ve
328.PP
329.Vb 1
330\& MATRIX
331.Ve
332.PP
333(Remember that you may use spaces and tabs to format the matrix to
334your taste)
335.PP
336Note that this method uses exactly the same representation for a
337matrix as the \*(L"stringify\*(R" operator "": this means that you can convert
338any matrix into a string with \f(CW\*(C`$string = "$matrix";\*(C'\fR and read it back
339in later (for instance from a file!).
340.PP
341Note however that you may suffer a precision loss in this process
342because only 13 digits are supported in the mantissa when printed!!
343.PP
344If the string you supply (or someone else supplies) does not obey
345the syntax mentioned above, an exception is raised, which can be
346caught by \*(L"eval\*(R" as follows:
347.PP
348.Vb 14
349\& print "Please enter your matrix (in one line): ";
350\& $string = <STDIN>;
351\& $string =~ s/\e\en/\en/g;
352\& eval { $matrix = Math::MatrixReal->new_from_string($string); };
353\& if ($@)
354\& {
355\& print "$@";
356\& # ...
357\& # (error handling)
358\& }
359\& else
360\& {
361\& # continue...
362\& }
363.Ve
364.PP
365or as follows:
366.PP
367.Vb 7
368\& eval { $matrix = Math::MatrixReal->new_from_string(<<"MATRIX"); };
369\& [ 3 2 0 ]
370\& [ 0 3 2 ]
371\& [ $c1 $c2 $c3 ]
372\& MATRIX
373\& if ($@)
374\& # ...
375.Ve
376.PP
377Actually, the method shown above for reading a matrix from the keyboard
378is a little awkward, since you have to enter a lot of \*(L"\en\*(R"'s for the
379newlines.
380.PP
381A better way is shown in this piece of code:
382.PP
383.Vb 13
384\& while (1)
385\& {
386\& print "\enPlease enter your matrix ";
387\& print "(multiple lines, <ctrl-D> = done):\en";
388\& eval { $new_matrix =
389\& Math::MatrixReal->new_from_string(join('',<STDIN>)); };
390\& if ($@)
391\& {
392\& $@ =~ s/\es+at\eb.*?$//;
393\& print "${@}Please try again.\en";
394\& }
395\& else { last; }
396\& }
397.Ve
398.PP
399Possible error messages of the \*(L"\fInew_from_string()\fR\*(R" method are:
400.PP
401.Vb 2
402\& Math::MatrixReal::new_from_string(): syntax error in input string
403\& Math::MatrixReal::new_from_string(): empty input string
404.Ve
405.PP
406If the input string has rows with varying numbers of columns,
407the following warning will be printed to \s-1STDERR:\s0
408.PP
409.Vb 1
410\& Math::MatrixReal::new_from_string(): missing elements will be set to zero!
411.Ve
412.PP
413If everything is okay, the method returns an object reference to the
414(newly allocated) matrix containing the elements you specified.
415.RE
416.IP "\(bu"
417\&\f(CW\*(C`$new_matrix = $some_matrix\->shadow();\*(C'\fR
418.PP
419Returns an object reference to a \fB\s-1NEW\s0\fR but \fB\s-1EMPTY\s0\fR matrix
420(filled with zero's) of the \fB\s-1SAME\s0 \s-1SIZE\s0\fR as matrix "\f(CW$some_matrix\fR".
421.PP
422Matrix "\f(CW$some_matrix\fR" is not changed by this in any way.
423.RE
424.IP "\(bu"
425\&\f(CW\*(C`$matrix1\->copy($matrix2);\*(C'\fR
426.PP
427Copies the contents of matrix "\f(CW$matrix2\fR" to an \fB\s-1ALREADY\s0 \s-1EXISTING\s0\fR
428matrix "\f(CW$matrix1\fR\*(L" (which must have the same size as matrix \*(R"\f(CW$matrix2\fR"!).
429.PP
430Matrix "\f(CW$matrix2\fR" is not changed by this in any way.
431.RE
432.IP "\(bu"
433\&\f(CW\*(C`$twin_matrix = $some_matrix\->clone();\*(C'\fR
434.PP
435Returns an object reference to a \fB\s-1NEW\s0\fR matrix of the \fB\s-1SAME\s0 \s-1SIZE\s0\fR as
436matrix "\f(CW$some_matrix\fR\*(L". The contents of matrix \*(R"\f(CW$some_matrix\fR" have
437\&\fB\s-1ALREADY\s0 \s-1BEEN\s0 \s-1COPIED\s0\fR to the new matrix "\f(CW$twin_matrix\fR\*(L". This
438is the method that the operator \*(R"=" is overloaded to when you type
439\&\f(CW\*(C`$a = $b\*(C'\fR, when \f(CW$a\fR and \f(CW$b\fR are matrices.
440.PP
441Matrix "\f(CW$some_matrix\fR" is not changed by this in any way.
442.Sh "Matrix Row, Column and Element operations"
443.IX Subsection "Matrix Row, Column and Element operations"
444.RE
445.IP "\(bu"
446\&\f(CW\*(C`$row_vector = $matrix\->row($row);\*(C'\fR
447.PP
448This is a projection method which returns an object reference to
449a \fB\s-1NEW\s0\fR matrix (which in fact is a (row) vector since it has only
450one row) to which row number "\f(CW$row\fR\*(L" of matrix \*(R"\f(CW$matrix\fR" has
451already been copied.
452.PP
453Matrix "\f(CW$matrix\fR" is not changed by this in any way.
454.RE
455.IP "\(bu"
456\&\f(CW\*(C`$column_vector = $matrix\->column($column);\*(C'\fR
457.PP
458This is a projection method which returns an object reference to
459a \fB\s-1NEW\s0\fR matrix (which in fact is a (column) vector since it has
460only one column) to which column number "\f(CW$column\fR\*(L" of matrix
461\&\*(R"\f(CW$matrix\fR" has already been copied.
462.PP
463Matrix "\f(CW$matrix\fR" is not changed by this in any way.
464.RE
465.IP "\(bu"
466\&\f(CW\*(C`$matrix\->assign($row,$column,$value);\*(C'\fR
467.PP
468Explicitly assigns a value "\f(CW$value\fR\*(L" to a single element of the
469matrix \*(R"\f(CW$matrix\fR\*(L", located in row \*(R"\f(CW$row\fR\*(L" and column \*(R"\f(CW$column\fR",
470thereby replacing the value previously stored there.
471.RE
472.IP "\(bu"
473\&\f(CW\*(C`$value = $matrix\->\*(C'\fR\f(CW\*(C`element($row,$column);\*(C'\fR
474.PP
475Returns the value of a specific element of the matrix "\f(CW$matrix\fR\*(L",
476located in row \*(R"\f(CW$row\fR\*(L" and column \*(R"\f(CW$column\fR".
477.RE
478.IP "\(bu"
479\&\f(CW\*(C`$new_matrix = $matrix\->each( \e&function )\*(C'\fR;
480.PP
481Creates a new matrix by evaluating a code reference on each element of the
482given matrix. The function is passed the element, the row index and the column
483index, in that order. The value the function returns ( or the value of the last
484executed statement ) is the value given to the corresponding element in \f(CW$new_matrix\fR.
485.PP
486Example:
487.PP
488.Vb 2
489\& # add 1 to every element in the matrix
490\& $matrix = $matrix->each ( sub { (shift) + 1 } );
491.Ve
492.PP
493Example:
494.PP
495.Vb 4
496\& my $cofactor = $matrix->each( sub { my(undef,$i,$j) = @_;
497\& ($i+$j) % 2 == 0 ? $matrix->minor($i,$j)->det()
498\& : -1*$matrix->minor($i,$j)->det();
499\& } );
500.Ve
501.PP
502This code needs some explanation. For each element of \f(CW$matrix\fR, it throws away the actual value
503and stores the row and column indexes in \f(CW$i\fR and \f(CW$j\fR. Then it sets element [$i,$j] in \f(CW$cofactor\fR
504to the determinant of \f(CW\*(C`$matrix\->minor($i,$j)\*(C'\fR if it is an \*(L"even\*(R" element, or \f(CW\*(C`\-1*$matrix\->minor($i,$j)\*(C'\fR
505if it is an \*(L"odd\*(R" element.
506.RE
507.IP "\(bu"
508\&\f(CW\*(C`$new_matrix = $matrix\->each_diag( \e&function )\*(C'\fR;
509.PP
510Creates a new matrix by evaluating a code reference on each diagonal element of the
511given matrix. The function is passed the element, the row index and the column
512index, in that order. The value the function returns ( or the value of the last
513executed statement ) is the value given to the corresponding element in \f(CW$new_matrix\fR.
514.RE
515.IP "\(bu"
516\&\f(CW\*(C`$matrix\->swap_col( $col1, $col2 );\*(C'\fR
517.PP
518This method takes two one-based column numbers and swaps the values of each element in each column.
519\&\f(CW\*(C`$matrix\->swap_col(2,3)\*(C'\fR would replace column 2 in \f(CW$matrix\fR with column 3, and replace column
5203 with column 2.
521.RE
522.IP "\(bu"
523\&\f(CW\*(C`$matrix\->swap_row( $row1, $row2 );\*(C'\fR
524.PP
525This method takes two one-based row numbers and swaps the values of each element in each row.
526\&\f(CW\*(C`$matrix\->swap_row(2,3)\*(C'\fR would replace row 2 in \f(CW$matrix\fR with row 3, and replace row
5273 with row 2.
528.Sh "Matrix Operations"
529.IX Subsection "Matrix Operations"
530.RE
531.IP "\(bu"
532\&\f(CW\*(C`$det = $matrix\->det();\*(C'\fR
533.PP
534Returns the determinant of the matrix, without going through
535the rigamarole of computing a \s-1LR\s0 decomposition. This method should
536be much faster than \s-1LR\s0 decomposition if the matrix is diagonal or
537triangular. Otherwise, it is just a wrapper for
538\&\f(CW\*(C`$matrix\->decompose_LR\->det_LR\*(C'\fR. If the determinant is zero,
539there is no inverse and vice\-versa. Only quadratic matrices have
540determinants.
541.RE
542.IP "\(bu"
543\&\f(CW\*(C`$inverse = $matrix\->inverse();\*(C'\fR
544.PP
545Returns the inverse of a matrix, without going through the
546rigamarole of computing a \s-1LR\s0 decomposition. If no inverse exists,
547undef is returned and an error is printed via \f(CW\*(C`carp()\*(C'\fR.
548This is nothing but a wrapper for \f(CW\*(C`$matrix\->decompose_LR\->invert_LR\*(C'\fR.
549.RE
550.IP "\(bu"
551\&\f(CW\*(C`($rows,$columns) = $matrix\->dim();\*(C'\fR
552.PP
553Returns a list of two items, representing the number of rows
554and columns the given matrix "\f(CW$matrix\fR" contains.
555.RE
556.IP "\(bu"
557\&\f(CW\*(C`$norm_one = $matrix\->norm_one();\*(C'\fR
558.PP
559Returns the \*(L"one\*(R"\-norm of the given matrix "\f(CW$matrix\fR".
560.PP
561The \*(L"one\*(R"\-norm is defined as follows:
562.PP
563For each column, the sum of the absolute values of the elements in the
564different rows of that column is calculated. Finally, the maximum
565of these sums is returned.
566.PP
567Note that the \*(L"one\*(R"\-norm and the \*(L"maximum\*(R"\-norm are mathematically
568equivalent, although for the same matrix they usually yield a different
569value.
570.PP
571Therefore, you should only compare values that have been calculated
572using the same norm!
573.PP
574Throughout this package, the \*(L"one\*(R"\-norm is (arbitrarily) used
575for all comparisons, for the sake of uniformity and comparability,
576except for the iterative methods \*(L"\fIsolve_GSM()\fR\*(R", \*(L"\fIsolve_SSM()\fR\*(R" and
577\&\*(L"\fIsolve_RM()\fR\*(R" which use either norm depending on the matrix itself.
578.RE
579.IP "\(bu"
580\&\f(CW\*(C`$norm_max = $matrix\->norm_max();\*(C'\fR
581.PP
582Returns the \*(L"maximum\*(R"\-norm of the given matrix \f(CW$matrix\fR.
583.PP
584The \*(L"maximum\*(R"\-norm is defined as follows:
585.PP
586For each row, the sum of the absolute values of the elements in the
587different columns of that row is calculated. Finally, the maximum
588of these sums is returned.
589.PP
590Note that the \*(L"maximum\*(R"\-norm and the \*(L"one\*(R"\-norm are mathematically
591equivalent, although for the same matrix they usually yield a different
592value.
593.PP
594Therefore, you should only compare values that have been calculated
595using the same norm!
596.PP
597Throughout this package, the \*(L"one\*(R"\-norm is (arbitrarily) used
598for all comparisons, for the sake of uniformity and comparability,
599except for the iterative methods \*(L"\fIsolve_GSM()\fR\*(R", \*(L"\fIsolve_SSM()\fR\*(R" and
600\&\*(L"\fIsolve_RM()\fR\*(R" which use either norm depending on the matrix itself.
601.RE
602.IP "\(bu"
603\&\f(CW\*(C`$norm_sum = $matrix\->norm_sum();\*(C'\fR
604.PP
605This is a very simple norm which is defined as the sum of the
606absolute values of every element.
607.RE
608.IP "\(bu"
609\&\f(CW$p_norm\fR = \f(CW$matrix\fR\->norm_p($n);>
610.PP
611This function returns the \*(L"p\-norm\*(R" of a vector. The argument \f(CW$n\fR
612must be a number greater than or equal to 1 or the string \*(L"Inf\*(R".
613The p\-norm is defined as (sum(x_i^p))^(1/p). In words, it raised
614each element to the p\-th power, adds them up, and then takes the
615p\-th root of that number. If the string \*(L"Inf\*(R" is passed, the
616\&\*(L"infinity\-norm\*(R" is computed, which is really the limit of the
617p\-norm as p goes to infinity. It is defined as the maximum element
618of the vector. Also, note that the familiar Euclidean distance
619between two vectors is just a special case of a p\-norm, when p is
620equal to 2.
621.PP
622Example:
623 \f(CW$a\fR = Math::MatrixReal\->new_from_cols([[1,2,3]]);
624 \f(CW$p1\fR = \f(CW$a\fR\->\fInorm_p\fR\|(1);
625 \f(CW$p2\fR = \f(CW$a\fR\->\fInorm_p\fR\|(2);
626 \f(CW$p3\fR = \f(CW$a\fR\->\fInorm_p\fR\|(3);
627 \f(CW$pinf\fR = \f(CW$a\fR\->norm_p(\*(L"Inf\*(R");
628.PP
629.Vb 1
630\& print "(1,2,3,Inf) norm:\en$p1\en$p2\en$p3\en$pinf\en";
631.Ve
632.PP
633.Vb 2
634\& $i1 = $a->new_from_rows([[1,0]]);
635\& $i2 = $a->new_from_rows([[0,1]]);
636.Ve
637.PP
638.Vb 2
639\& # this should be sqrt(2) since it is the same as the
640\& # hypotenuse of a 1 by 1 right triangle
641.Ve
642.PP
643.Vb 2
644\& $dist = ($i1-$i2)->norm_p(2);
645\& print "Distance is $dist, which should be " . sqrt(2) . "\en";
646.Ve
647.PP
648Output:
649.PP
650.Vb 5
651\& (1,2,3,Inf) norm:
652\& 6
653\& 3.74165738677394139
654\& 3.30192724889462668
655\& 3
656.Ve
657.PP
658.Vb 1
659\& Distance is 1.41421356237309505, which should be 1.41421356237309505
660.Ve
661.RE
662.IP "\(bu"
663\&\f(CW$frob_norm\fR = \f(CW\*(C`$matrix\->norm_frobenius();\*(C'\fR
664.PP
665This norm is similar to that of a p\-norm where p is 2, except it
666acts on a \fBmatrix\fR, not a vector. Each element of the matrix is
667squared, this is added up, and then a square root is taken.
668.RE
669.IP "\(bu"
670\&\f(CW\*(C`$matrix\->spectral_radius();\*(C'\fR
671.PP
672Returns the maximum value of the absolute value of all eigenvalues.
673Currently this computes \fBall\fR eigenvalues, then sifts through them
674to find the largest in absolute value. Needless to say, this is very
675inefficient, and in the future an algorithm that computes only the
676largest eigenvalue may be implemented.
677.RE
678.IP "\(bu"
679\&\f(CW\*(C`$matrix1\->transpose($matrix2);\*(C'\fR
680.PP
681Calculates the transposed matrix of matrix \f(CW$matrix2\fR and stores
682the result in matrix "\f(CW$matrix1\fR\*(L" (which must already exist and have
683the same size as matrix \*(R"\f(CW$matrix2\fR"!).
684.PP
685This operation can also be carried out \*(L"in\-place\*(R", i.e., input and
686output matrix may be identical.
687.PP
688Transposition is a symmetry operation: imagine you rotate the matrix
689along the axis of its main diagonal (going through elements (1,1),
690(2,2), (3,3) and so on) by 180 degrees.
691.PP
692Another way of looking at it is to say that rows and columns are
693swapped. In fact the contents of element \f(CW\*(C`(i,j)\*(C'\fR are swapped
694with those of element \f(CW\*(C`(j,i)\*(C'\fR.
695.PP
696Note that (especially for vectors) it makes a big difference if you
697have a row vector, like this:
698.PP
699.Vb 1
700\& [ -1 0 1 ]
701.Ve
702.PP
703or a column vector, like this:
704.PP
705.Vb 3
706\& [ -1 ]
707\& [ 0 ]
708\& [ 1 ]
709.Ve
710.PP
711the one vector being the transposed of the other!
712.PP
713This is especially true for the matrix product of two vectors:
714.PP
715.Vb 3
716\& [ -1 ]
717\& [ -1 0 1 ] * [ 0 ] = [ 2 ] , whereas
718\& [ 1 ]
719.Ve
720.PP
721.Vb 5
722\& * [ -1 0 1 ]
723\& [ -1 ] [ 1 0 -1 ]
724\& [ 0 ] * [ -1 0 1 ] = [ -1 ] [ 1 0 -1 ] = [ 0 0 0 ]
725\& [ 1 ] [ 0 ] [ 0 0 0 ] [ -1 0 1 ]
726\& [ 1 ] [ -1 0 1 ]
727.Ve
728.PP
729So be careful about what you really mean!
730.PP
731Hint: throughout this module, whenever a vector is explicitly required
732for input, a \fB\s-1COLUMN\s0\fR vector is expected!
733.RE
734.IP "\(bu"
735\&\f(CW\*(C`$trace = $matrix\->trace();\*(C'\fR
736.PP
737This returns the trace of the matrix, which is defined as
738the sum of the diagonal elements. The matrix must be
739quadratic.
740.RE
741.IP "\(bu"
742\&\f(CW\*(C`$minor = $matrix\->minor($row,$col);\*(C'\fR
743.PP
744Returns the minor matrix corresponding to \f(CW$row\fR and \f(CW$col\fR. \f(CW$matrix\fR must be quadratic.
745If \f(CW$matrix\fR is n rows by n cols, the minor of \f(CW$row\fR and \f(CW$col\fR will be an (n\-1) by (n\-1)
746matrix. The minor is defined as crossing out the row and the col specified and returning
747the remaining rows and columns as a matrix. This method is used by \f(CW\*(C`cofactor()\*(C'\fR.
748.RE
749.IP "\(bu"
750\&\f(CW\*(C`$cofactor = $matrix\->cofactor();\*(C'\fR
751.PP
752The cofactor matrix is constructed as follows:
753.PP
754For each element, cross out the row and column that it sits in.
755Now, take the determinant of the matrix that is left in the other
756rows and columns.
757Multiply the determinant by (\-1)^(i+j), where i is the row index,
758and j is the column index.
759Replace the given element with this value.
760.PP
761The cofactor matrix can be used to find the inverse of the matrix. One formula for the
762inverse of a matrix is the cofactor matrix transposed divided by the original
763determinant of the matrix.
764.PP
765The following two inverses should be exactly the same:
766.PP
767.Vb 2
768\& my $inverse1 = $matrix->inverse;
769\& my $inverse2 = ~($matrix->cofactor)->each( sub { (shift)/$matrix->det() } );
770.Ve
771.PP
772Caveat: Although the cofactor matrix is simple algorithm to compute the inverse of a matrix, and
773can be used with pencil and paper for small matrices, it is comically slower than
774the native \f(CW\*(C`inverse()\*(C'\fR function. Here is a small benchmark:
775.PP
776.Vb 6
777\& # $matrix1 is 15x15
778\& $det = $matrix1->det;
779\& timethese( 10,
780\& {'inverse' => sub { $matrix1->inverse(); },
781\& 'cofactor' => sub { (~$matrix1->cofactor)->each ( sub { (shift)/$det; } ) }
782\& } );
783.Ve
784.PP
785.Vb 3
786\& Benchmark: timing 10 iterations of LR, cofactor, inverse...
787\& inverse: 1 wallclock secs ( 0.56 usr + 0.00 sys = 0.56 CPU) @ 17.86/s (n=10)
788\& cofactor: 36 wallclock secs (36.62 usr + 0.01 sys = 36.63 CPU) @ 0.27/s (n=10)
789.Ve
790.RE
791.IP "\(bu"
792\&\f(CW\*(C`$adjoint = $matrix\->adjoint();\*(C'\fR
793.PP
794The adjoint is just the transpose of the cofactor matrix. This method is
795just an alias for \f(CW\*(C` ~($matrix\->cofactor)\*(C'\fR.
796.Sh "Arithmetic Operations"
797.IX Subsection "Arithmetic Operations"
798.RE
799.IP "\(bu"
800\&\f(CW\*(C`$matrix1\->add($matrix2,$matrix3);\*(C'\fR
801.PP
802Calculates the sum of matrix "\f(CW$matrix2\fR\*(L" and matrix \*(R"\f(CW$matrix3\fR\*(L"
803and stores the result in matrix \*(R"\f(CW$matrix1\fR\*(L" (which must already exist
804and have the same size as matrix \*(R"\f(CW$matrix2\fR\*(L" and matrix \*(R"\f(CW$matrix3\fR"!).
805.PP
806This operation can also be carried out \*(L"in\-place\*(R", i.e., the output and
807one (or both) of the input matrices may be identical.
808.RE
809.IP "\(bu"
810\&\f(CW\*(C`$matrix1\->subtract($matrix2,$matrix3);\*(C'\fR
811.PP
812Calculates the difference of matrix "\f(CW$matrix2\fR\*(L" minus matrix \*(R"\f(CW$matrix3\fR\*(L"
813and stores the result in matrix \*(R"\f(CW$matrix1\fR\*(L" (which must already exist
814and have the same size as matrix \*(R"\f(CW$matrix2\fR\*(L" and matrix \*(R"\f(CW$matrix3\fR"!).
815.PP
816This operation can also be carried out \*(L"in\-place\*(R", i.e., the output and
817one (or both) of the input matrices may be identical.
818.PP
819Note that this operation is the same as
820\&\f(CW\*(C`$matrix1\->add($matrix2,\-$matrix3);\*(C'\fR, although the latter is
821a little less efficient.
822.RE
823.IP "\(bu"
824\&\f(CW\*(C`$matrix1\->multiply_scalar($matrix2,$scalar);\*(C'\fR
825.PP
826Calculates the product of matrix "\f(CW$matrix2\fR\*(L" and the number \*(R"\f(CW$scalar\fR\*(L"
827(i.e., multiplies each element of matrix \*(R"\f(CW$matrix2\fR\*(L" with the factor
828\&\*(R"\f(CW$scalar\fR\*(L") and stores the result in matrix \*(R"\f(CW$matrix1\fR\*(L" (which must
829already exist and have the same size as matrix \*(R"\f(CW$matrix2\fR"!).
830.PP
831This operation can also be carried out \*(L"in\-place\*(R", i.e., input and
832output matrix may be identical.
833.RE
834.IP "\(bu"
835\&\f(CW\*(C`$product_matrix = $matrix1\->multiply($matrix2);\*(C'\fR
836.PP
837Calculates the product of matrix "\f(CW$matrix1\fR\*(L" and matrix \*(R"\f(CW$matrix2\fR\*(L"
838and returns an object reference to a new matrix \*(R"\f(CW$product_matrix\fR" in
839which the result of this operation has been stored.
840.PP
841Note that the dimensions of the two matrices "\f(CW$matrix1\fR\*(L" and \*(R"\f(CW$matrix2\fR"
842(i.e., their numbers of rows and columns) must harmonize in the following
843way (example):
844.PP
845.Vb 3
846\& [ 2 2 ]
847\& [ 2 2 ]
848\& [ 2 2 ]
849.Ve
850.PP
851.Vb 4
852\& [ 1 1 1 ] [ * * ]
853\& [ 1 1 1 ] [ * * ]
854\& [ 1 1 1 ] [ * * ]
855\& [ 1 1 1 ] [ * * ]
856.Ve
857.PP
858I.e., the number of columns of matrix "\f(CW$matrix1\fR\*(L" has to be the same
859as the number of rows of matrix \*(R"\f(CW$matrix2\fR".
860.PP
861The number of rows and columns of the resulting matrix "\f(CW$product_matrix\fR\*(L"
862is determined by the number of rows of matrix \*(R"\f(CW$matrix1\fR\*(L" and the number
863of columns of matrix \*(R"\f(CW$matrix2\fR", respectively.
864.RE
865.IP "\(bu"
866\&\f(CW\*(C`$matrix1\->negate($matrix2);\*(C'\fR
867.PP
868Calculates the negative of matrix "\f(CW$matrix2\fR\*(L" (i.e., multiplies
869all elements with \*(R"\-1\*(L") and stores the result in matrix \*(R"\f(CW$matrix1\fR\*(L"
870(which must already exist and have the same size as matrix \*(R"\f(CW$matrix2\fR"!).
871.PP
872This operation can also be carried out \*(L"in\-place\*(R", i.e., input and
873output matrix may be identical.
874.RE
875.IP "\(bu"
876\&\f(CW\*(C`$matrix_to_power = $matrix1\->exponent($integer);\*(C'\fR
877.PP
878Raises the matrix to the \f(CW$integer\fR power. Obviously, \f(CW$integer\fR must
879be an integer. If it is zero, the identity matrix is returned. If a negative
880integer is given, the inverse will be computed (if it exists) and then raised
881the the absolute value of \f(CW$integer\fR. The matrix must be quadratic.
882.Sh "Boolean Matrix Operations"
883.IX Subsection "Boolean Matrix Operations"
884.RE
885.IP "\(bu"
886\&\f(CW\*(C`$matrix\->is_quadratic();\*(C'\fR
887.PP
888Returns a boolean value indicating if the given matrix is
889quadratic (also know as \*(L"square\*(R" or \*(L"n by n\*(R"). A matrix is
890quadratic if it has the same number of rows as it does columns.
891.RE
892.IP "\(bu"
893\&\f(CW\*(C`$matrix\->is_square();\*(C'\fR
894.PP
895This is an alias for \f(CW\*(C`is_quadratic()\*(C'\fR.
896.RE
897.IP "\(bu"
898\&\f(CW\*(C`$matrix\->is_symmetric();\*(C'\fR
899.PP
900Returns a boolean value indicating if the given matrix is
901symmetric. By definition, a matrix is symmetric if and only
902if (\fBM\fR[\fIi\fR,\fIj\fR]=\fBM\fR[\fIj\fR,\fIi\fR]). This is equivalent to
903\&\f(CW\*(C`($matrix == ~$matrix)\*(C'\fR but without memory allocation.
904Only quadratic matrices can be symmetric.
905.PP
906Notes: A symmetric matrix always has real eigenvalues/eigenvectors.
907A matrix plus its transpose is always symmetric.
908.RE
909.IP "\(bu"
910\&\f(CW\*(C`$matrix\->is_skew_symmetric();\*(C'\fR
911.PP
912Returns a boolean value indicating if the given matrix is
913skew symmetric. By definition, a matrix is symmetric if and only
914if (\fBM\fR[\fIi\fR,\fIj\fR]=\fB\-M\fR[\fIj\fR,\fIi\fR]). This is equivalent to
915\&\f(CW\*(C`($matrix == \-(~$matrix))\*(C'\fR but without memory allocation.
916Only quadratic matrices can be skew symmetric.
917.RE
918.IP "\(bu"
919\&\f(CW\*(C`$matrix\->is_diagonal();\*(C'\fR
920.PP
921Returns a boolean value indicating if the given matrix is
922diagonal, i.e. all of the nonzero elements are on the main diagonal.
923Only quadratic matrices can be diagonal.
924.RE
925.IP "\(bu"
926\&\f(CW\*(C`$matrix\->is_tridiagonal();\*(C'\fR
927.PP
928Returns a boolean value indicating if the given matrix is
929tridiagonal, i.e. all of the nonzero elements are on the main diagonal
930or the diagonals above and below the main diagonal.
931Only quadratic matrices can be tridiagonal.
932.RE
933.IP "\(bu"
934\&\f(CW\*(C`$matrix\->is_upper_triangular();\*(C'\fR
935.PP
936Returns a boolean value indicating if the given matrix is upper triangular,
937i.e. all of the nonzero elements not on the main diagonal are above it.
938Only quadratic matrices can be upper triangular.
939Note: diagonal matrices are both upper and lower triangular.
940.RE
941.IP "\(bu"
942\&\f(CW\*(C`$matrix\->is_lower_triangular();\*(C'\fR
943.PP
944Returns a boolean value indicating if the given matrix is lower triangular,
945i.e. all of the nonzero elements not on the main diagonal are below it.
946Only quadratic matrices can be lower triangular.
947Note: diagonal matrices are both upper and lower triangular.
948.RE
949.IP "\(bu"
950\&\f(CW\*(C`$matrix\->is_orthogonal();\*(C'\fR
951.PP
952Returns a boolean value indicating if the given matrix is orthogonal.
953An orthogonal matrix is has the property that the transpose equals the
954inverse of the matrix. Instead of computing each and comparing them, this
955method multiplies the matrix by it's transpose, and returns true if this
956turns out to be the identity matrix, false otherwise.
957Only quadratic matrices can orthogonal.
958.RE
959.IP "\(bu"
960\&\f(CW\*(C`$matrix\->is_binary();\*(C'\fR
961.PP
962Returns a boolean value indicating if the given matrix is binary.
963A matrix is binary if it contains only zeroes or ones.
964.RE
965.IP "\(bu"
966\&\f(CW\*(C`$matrix\->is_gramian();\*(C'\fR
967.PP
968Returns a boolean value indicating if the give matrix is Gramian.
969A matrix \f(CW$A\fR is Gramian if and only if there exists a
970square matrix \f(CW$B\fR such that \f(CW\*(C`$A = ~$B*$B\*(C'\fR. This is equivalent to
971checking if \f(CW$A\fR is symmetric and has all nonnegative eigenvalues, which
972is what Math::MatrixReal uses to check for this property.
973.RE
974.IP "\(bu"
975\&\f(CW\*(C`$matrix\->is_LR();\*(C'\fR
976.PP
977Returns a boolean value indicating if the matrix is an \s-1LR\s0 decomposition
978matrix.
979.RE
980.IP "\(bu"
981\&\f(CW\*(C`$matrix\->is_positive();\*(C'\fR
982.PP
983Returns a boolean value indicating if the matrix contains only
984positive entries. Note that a zero entry is not positive and
985will cause \f(CW\*(C`is_positive()\*(C'\fR to return false.
986.RE
987.IP "\(bu"
988\&\f(CW\*(C`$matrix\->is_negative();\*(C'\fR
989.PP
990Returns a boolean value indicating if the matrix contains only
991negative entries. Note that a zero entry is not negative and
992will cause \f(CW\*(C`is_negative()\*(C'\fR to return false.
993.RE
994.IP "\(bu"
995\&\f(CW\*(C`$matrix\->is_periodic($k);\*(C'\fR
996.PP
997Returns a boolean value indicating if the matrix is periodic
998with period \f(CW$k\fR. This is true if \f(CW\*(C`$matrix ** ($k+1) == $matrix\*(C'\fR.
999When \f(CW\*(C`$k == 1\*(C'\fR, this reduces down to the \f(CW\*(C`is_idempotent()\*(C'\fR
1000function.
1001.RE
1002.IP "\(bu"
1003\&\f(CW\*(C`$matrix\->is_idempotent();\*(C'\fR
1004.PP
1005Returns a boolean value indicating if the matrix is idempotent,
1006which is defined as the square of the matrix being equal to
1007the original matrix, i.e \f(CW\*(C`$matrix ** 2 == $matrix\*(C'\fR.
1008.RE
1009.IP "\(bu"
1010\&\f(CW\*(C`$matrix\->is_row_vector();\*(C'\fR
1011.PP
1012Returns a boolean value indicating if the matrix is a row vector.
1013A row vector is a matrix which is 1xn. Note that the 1x1 matrix is
1014both a row and column vector.
1015.RE
1016.IP "\(bu"
1017\&\f(CW\*(C`$matrix\->is_col_vector();\*(C'\fR
1018.PP
1019Returns a boolean value indicating if the matrix is a col vector.
1020A col vector is a matrix which is nx1. Note that the 1x1 matrix is
1021both a row and column vector.
1022.Sh "Eigensystems"
1023.IX Subsection "Eigensystems"
1024.IP "\(bu" 2
1025\&\f(CW\*(C`($l, $V) = $matrix\->sym_diagonalize();\*(C'\fR
1026.Sp
1027This method performs the diagonalization of the quadratic
1028\&\fIsymmetric\fR matrix \fBM\fR stored in \f(CW$matrix\fR.
1029On output, \fBl\fR is a column vector containing all the eigenvalues
1030of \fBM\fR and \fBV\fR is an orthogonal matrix which columns are the
1031corresponding normalized eigenvectors.
1032The primary property of an eigenvalue \fIl\fR and an eigenvector
1033\&\fBx\fR is of course that: \fBM\fR * \fBx\fR = \fIl\fR * \fBx\fR.
1034.Sp
1035The method uses a Householder reduction to tridiagonal form
1036followed by a \s-1QL\s0 algoritm with implicit shifts on this
1037tridiagonal. (The tridiagonal matrix is kept internally
1038in a compact form in this routine to save memory.)
1039In fact, this routine wraps the \fIhouseholder()\fR and
1040\&\fItri_diagonalize()\fR methods described below when their
1041intermediate results are not desired.
1042The overall algorithmic complexity of this technique
1043is O(N^3). According to several books, the coefficient
1044hidden by the 'O' is one of the best possible for general
1045(symmetric) matrixes.
1046.IP "\(bu" 2
1047\&\f(CW\*(C`($T, $Q) = $matrix\->householder();\*(C'\fR
1048.Sp
1049This method performs the Householder algorithm which reduces
1050the \fIn\fR by \fIn\fR real \fIsymmetric\fR matrix \fBM\fR contained
1051in \f(CW$matrix\fR to tridiagonal form.
1052On output, \fBT\fR is a symmetric tridiagonal matrix (only
1053diagonal and off-diagonal elements are non\-zero) and \fBQ\fR
1054is an \fIorthogonal\fR matrix performing the tranformation
1055between \fBM\fR and \fBT\fR (\f(CW\*(C`$M == $Q * $T * ~$Q\*(C'\fR).
1056.IP "\(bu" 2
1057\&\f(CW\*(C`($l, $V) = $T\->tri_diagonalize([$Q]);\*(C'\fR
1058.Sp
1059This method diagonalizes the symmetric tridiagonal
1060matrix \fBT\fR. On output, \f(CW$l\fR and \f(CW$V\fR are similar to the
1061output values described for \fIsym_diagonalize()\fR.
1062.Sp
1063The optional argument \f(CW$Q\fR corresponds to an orthogonal
1064transformation matrix \fBQ\fR that should be used additionally
1065during \fBV\fR (eigenvectors) computation. It should be supplied
1066if the desired eigenvectors correspond to a more general
1067symmetric matrix \fBM\fR previously reduced by the
1068\&\fIhouseholder()\fR method, not a mere tridiagonal. If \fBT\fR is
1069really a tridiagonal matrix, \fBQ\fR can be omitted (it
1070will be internally created in fact as an identity matrix).
1071The method uses a \s-1QL\s0 algorithm (with implicit shifts).
1072.IP "\(bu" 2
1073\&\f(CW\*(C`$l = $matrix\->sym_eigenvalues();\*(C'\fR
1074.Sp
1075This method computes the eigenvalues of the quadratic
1076\&\fIsymmetric\fR matrix \fBM\fR stored in \f(CW$matrix\fR.
1077On output, \fBl\fR is a column vector containing all the eigenvalues
1078of \fBM\fR. Eigenvectors are not computed (on the contrary of
1079\&\f(CW\*(C`sym_diagonalize()\*(C'\fR) and this method is more efficient
1080(even though it uses a similar algorithm with two phases).
1081However, understand that the algorithmic complexity of this
1082technique is still also O(N^3). But the coefficient hidden
1083by the 'O' is better by a factor of..., well, see your
1084benchmark, it's wiser.
1085.Sp
1086This routine wraps the \fIhouseholder_tridiagonal()\fR and
1087\&\fItri_eigenvalues()\fR methods described below when the
1088intermediate tridiagonal matrix is not needed.
1089.IP "\(bu" 2
1090\&\f(CW\*(C`$T = $matrix\->householder_tridiagonal();\*(C'\fR
1091.Sp
1092This method performs the Householder algorithm which reduces
1093the \fIn\fR by \fIn\fR real \fIsymmetric\fR matrix \fBM\fR contained
1094in \f(CW$matrix\fR to tridiagonal form.
1095On output, \fBT\fR is the obtained symmetric tridiagonal matrix
1096(only diagonal and off-diagonal elements are non\-zero). The
1097operation is similar to the \fIhouseholder()\fR method, but potentially
1098a little more efficient as the transformation matrix is not
1099computed.
1100.IP "\(bu" 2
1101\&\f(CW\*(C`$l = $T\->tri_eigenvalues();\*(C'\fR
1102.Sp
1103This method computesthe eigenvalues of the symmetric
1104tridiagonal matrix \fBT\fR. On output, \f(CW$l\fR is a vector
1105containing the eigenvalues (similar to \f(CW\*(C`sym_eigenvalues()\*(C'\fR).
1106This method is much more efficient than \fItri_diagonalize()\fR
1107when eigenvectors are not needed.
1108.Sh "Miscellaneous"
1109.IX Subsection "Miscellaneous"
1110.RE
1111.IP "\(bu"
1112\&\f(CW\*(C`$matrix\->zero();\*(C'\fR
1113.PP
1114Assigns a zero to every element of the matrix "\f(CW$matrix\fR\*(L", i.e.,
1115erases all values previously stored there, thereby effectively
1116transforming the matrix into a \*(R"zero\*(L"\-matrix or \*(R"null"\-matrix,
1117the neutral element of the addition operation in a Ring.
1118.PP
1119(For instance the (quadratic) matrices with \*(L"n\*(R" rows and columns
1120and matrix addition and multiplication form a Ring. Most prominent
1121characteristic of a Ring is that multiplication is not commutative,
1122i.e., in general, "\f(CW\*(C`matrix1 * matrix2\*(C'\fR\*(L" is not the same as
1123\&\*(R"\f(CW\*(C`matrix2 * matrix1\*(C'\fR"!)
1124.RE
1125.IP "\(bu"
1126\&\f(CW\*(C`$matrix\->one();\*(C'\fR
1127.PP
1128Assigns one's to the elements on the main diagonal (elements (1,1),
1129(2,2), (3,3) and so on) of matrix "\f(CW$matrix\fR\*(L" and zero's to all others,
1130thereby erasing all values previously stored there and transforming the
1131matrix into a \*(R"one"\-matrix, the neutral element of the multiplication
1132operation in a Ring.
1133.PP
1134(If the matrix is quadratic (which this method doesn't require, though),
1135then multiplying this matrix with itself yields this same matrix again,
1136and multiplying it with some other matrix leaves that other matrix
1137unchanged!)
1138.RE
1139.IP "\(bu"
1140\&\f(CW\*(C`$latex_string = $matrix\->as_latex( align=> "c", format => "%s", name => "" );\*(C'\fR
1141.PP
1142This function returns the matrix as a LaTeX string. It takes a hash as an
1143argument which is used to control the style of the output. The hash element \f(CW\*(C`align\*(C'\fR
1144may be \*(L"c\*(R",\*(L"l\*(R" or \*(L"r\*(R", corresponding to center, left and right, respectively. The
1145\&\f(CW\*(C`format\*(C'\fR element is a format string that is given to \f(CW\*(C`sprintf\*(C'\fR to control the
1146style of number format, such a floating point or scientific notation. The \f(CW\*(C`name\*(C'\fR
1147element can be used so that a LaTeX string of \*(L"$name = \*(R" is prepended to the string.
1148.PP
1149Example:
1150.PP
1151.Vb 2
1152\& my $a = Math::MatrixReal->new_from_cols([[ 1.234, 5.678, 9.1011],[1,2,3]] );
1153\& print $a->as_latex( ( format => "%.2f", align => "l",name => "A" ) );
1154.Ve
1155.PP
1156Output:
1157 \f(CW$A\fR = $ $
1158 \eleft( \ebegin{array}{ll}
1159 1.23&1.00 \e\e
1160 5.68&2.00 \e\e
1161 9.10&3.00
1162 \eend{array} \eright)
1163 $
1164.RE
1165.IP "\(bu"
1166\&\f(CW\*(C`$yacas_string = $matrix\->as_yacas( format => "%s", name => "", semi => 0 );\*(C'\fR
1167.PP
1168This function returns the matrix as a string that can be read by Yacas.
1169It takes a hash as
1170an an argument which controls the style of the output. The
1171\&\f(CW\*(C`format\*(C'\fR element is a format string that is given to \f(CW\*(C`sprintf\*(C'\fR to control the
1172style of number format, such a floating point or scientific notation. The \f(CW\*(C`name\*(C'\fR
1173element can be used so that \*(L"$name = \*(R" is prepended to the string. The <semi> element can
1174be set to 1 to that a semicolon is appended (so Matlab does not print out the matrix.)
1175.PP
1176Example:
1177.PP
1178.Vb 2
1179\& $a = Math::MatrixReal->new_from_cols([[ 1.234, 5.678, 9.1011],[1,2,3]] );
1180\& print $a->as_yacas( ( format => "%.2f", align => "l",name => "A" ) );
1181.Ve
1182.PP
1183Output:
1184.PP
1185.Vb 1
1186\& A := {{1.23,1.00},{5.68,2.00},{9.10,3.00}}
1187.Ve
1188.RE
1189.IP "\(bu"
1190\&\f(CW\*(C`$matlab_string = $matrix\->as_matlab( format => "%s", name => "", semi => 0 );\*(C'\fR
1191.PP
1192This function returns the matrix as a string that can be read by Matlab. It takes a hash as
1193an an argument which controls the style of the output. The
1194\&\f(CW\*(C`format\*(C'\fR element is a format string that is given to \f(CW\*(C`sprintf\*(C'\fR to control the
1195style of number format, such a floating point or scientific notation. The \f(CW\*(C`name\*(C'\fR
1196element can be used so that \*(L"$name = \*(R" is prepended to the string. The <semi> element can
1197be set to 1 to that a semicolon is appended (so Matlab does not print out the matrix.)
1198.PP
1199Example:
1200.PP
1201.Vb 2
1202\& my $a = Math::MatrixReal->new_from_rows([[ 1.234, 5.678, 9.1011],[1,2,3]] );
1203\& print $a->as_matlab( ( format => "%.3f", name => "A",semi => 1 ) );
1204.Ve
1205.PP
1206Output:
1207 A = [ 1.234 5.678 9.101;
1208 1.000 2.000 3.000];
1209.RE
1210.IP "\(bu"
1211\&\f(CW\*(C`$scilab_string = $matrix\->as_scilab( format => "%s", name => "", semi => 0 );\*(C'\fR
1212.PP
1213This function is just an alias for \f(CW\*(C`as_matlab()\*(C'\fR, since both Scilab and Matlab have the
1214same matrix format.
1215.RE
1216.IP "\(bu"
1217\&\f(CW\*(C`$minimum = Math::MatrixReal::min($number1,$number2);\*(C'\fR
1218.PP
1219Returns the minimum of the two numbers "\f(CW\*(C`number1\*(C'\fR\*(L" and \*(R"\f(CW\*(C`number2\*(C'\fR".
1220.RE
1221.IP "\(bu"
1222\&\f(CW\*(C`$maximum = Math::MatrixReal::max($number1,$number2);\*(C'\fR
1223.PP
1224Returns the maximum of the two numbers "\f(CW\*(C`number1\*(C'\fR\*(L" and \*(R"\f(CW\*(C`number2\*(C'\fR".
1225.RE
1226.IP "\(bu"
1227\&\f(CW\*(C`$minimal_cost_matrix = $cost_matrix\->kleene();\*(C'\fR
1228.PP
1229Copies the matrix "\f(CW$cost_matrix\fR\*(L" (which has to be quadratic!) to
1230a new matrix of the same size (i.e., \*(R"clones" the input matrix) and
1231applies Kleene's algorithm to it.
1232.PP
1233See \fIMath::Kleene\fR\|(3) for more details about this algorithm!
1234.PP
1235The method returns an object reference to the new matrix.
1236.PP
1237Matrix "\f(CW$cost_matrix\fR" is not changed by this method in any way.
1238.RE
1239.IP "\(bu"
1240\&\f(CW\*(C`($norm_matrix,$norm_vector) = $matrix\->normalize($vector);\*(C'\fR
1241.PP
1242This method is used to improve the numerical stability when solving
1243linear equation systems.
1244.PP
1245Suppose you have a matrix \*(L"A\*(R" and a vector \*(L"b\*(R" and you want to find
1246out a vector \*(L"x\*(R" so that \f(CW\*(C`A * x = b\*(C'\fR, i.e., the vector \*(L"x\*(R" which
1247solves the equation system represented by the matrix \*(L"A\*(R" and the
1248vector \*(L"b\*(R".
1249.PP
1250Applying this method to the pair (A,b) yields a pair (A',b') where
1251each row has been divided by (the absolute value of) the greatest
1252coefficient appearing in that row. So this coefficient becomes equal
1253to \*(L"1\*(R" (or \*(L"\-1\*(R") in the new pair (A',b') (all others become smaller
1254than one and greater than minus one).
1255.PP
1256Note that this operation does not change the equation system itself
1257because the same division is carried out on either side of the equation
1258sign!
1259.PP
1260The method requires a quadratic (!) matrix "\f(CW$matrix\fR\*(L" and a vector
1261\&\*(R"\f(CW$vector\fR" for input (the vector must be a column vector with the same
1262number of rows as the input matrix) and returns a list of two items
1263which are object references to a new matrix and a new vector, in this
1264order.
1265.PP
1266The output matrix and vector are clones of the input matrix and vector
1267to which the operation explained above has been applied.
1268.PP
1269The input matrix and vector are not changed by this in any way.
1270.PP
1271Example of how this method can affect the result of the methods to solve
1272equation systems (explained immediately below following this method):
1273.PP
1274Consider the following little program:
1275.PP
1276.Vb 1
1277\& #!perl -w
1278.Ve
1279.PP
1280.Vb 1
1281\& use Math::MatrixReal qw(new_from_string);
1282.Ve
1283.PP
1284.Vb 5
1285\& $A = Math::MatrixReal->new_from_string(<<"MATRIX");
1286\& [ 1 2 3 ]
1287\& [ 5 7 11 ]
1288\& [ 23 19 13 ]
1289\& MATRIX
1290.Ve
1291.PP
1292.Vb 5
1293\& $b = Math::MatrixReal->new_from_string(<<"MATRIX");
1294\& [ 0 ]
1295\& [ 1 ]
1296\& [ 29 ]
1297\& MATRIX
1298.Ve
1299.PP
1300.Vb 7
1301\& $LR = $A->decompose_LR();
1302\& if (($dim,$x,$B) = $LR->solve_LR($b))
1303\& {
1304\& $test = $A * $x;
1305\& print "x = \en$x";
1306\& print "A * x = \en$test";
1307\& }
1308.Ve
1309.PP
1310.Vb 1
1311\& ($A_,$b_) = $A->normalize($b);
1312.Ve
1313.PP
1314.Vb 7
1315\& $LR = $A_->decompose_LR();
1316\& if (($dim,$x,$B) = $LR->solve_LR($b_))
1317\& {
1318\& $test = $A * $x;
1319\& print "x = \en$x";
1320\& print "A * x = \en$test";
1321\& }
1322.Ve
1323.PP
1324This will print:
1325.PP
1326.Vb 16
1327\& x =
1328\& [ 1.000000000000E+00 ]
1329\& [ 1.000000000000E+00 ]
1330\& [ -1.000000000000E+00 ]
1331\& A * x =
1332\& [ 4.440892098501E-16 ]
1333\& [ 1.000000000000E+00 ]
1334\& [ 2.900000000000E+01 ]
1335\& x =
1336\& [ 1.000000000000E+00 ]
1337\& [ 1.000000000000E+00 ]
1338\& [ -1.000000000000E+00 ]
1339\& A * x =
1340\& [ 0.000000000000E+00 ]
1341\& [ 1.000000000000E+00 ]
1342\& [ 2.900000000000E+01 ]
1343.Ve
1344.PP
1345You can see that in the second example (where \*(L"\fInormalize()\fR\*(R" has been used),
1346the result is \*(L"better\*(R", i.e., more accurate!
1347.RE
1348.IP "\(bu"
1349\&\f(CW\*(C`$LR_matrix = $matrix\->decompose_LR();\*(C'\fR
1350.PP
1351This method is needed to solve linear equation systems.
1352.PP
1353Suppose you have a matrix \*(L"A\*(R" and a vector \*(L"b\*(R" and you want to find
1354out a vector \*(L"x\*(R" so that \f(CW\*(C`A * x = b\*(C'\fR, i.e., the vector \*(L"x\*(R" which
1355solves the equation system represented by the matrix \*(L"A\*(R" and the
1356vector \*(L"b\*(R".
1357.PP
1358You might also have a matrix \*(L"A\*(R" and a whole bunch of different
1359vectors \*(L"b1\*(R"..\*(L"bk\*(R" for which you need to find vectors \*(L"x1\*(R"..\*(L"xk\*(R"
1360so that \f(CW\*(C`A * xi = bi\*(C'\fR, for \f(CW\*(C`i=1..k\*(C'\fR.
1361.PP
1362Using Gaussian transformations (multiplying a row or column with
1363a factor, swapping two rows or two columns and adding a multiple
1364of one row or column to another), it is possible to decompose any
1365matrix \*(L"A\*(R" into two triangular matrices, called \*(L"L\*(R" and \*(L"R\*(R" (for
1366\&\*(L"Left\*(R" and \*(L"Right\*(R").
1367.PP
1368\&\*(L"L\*(R" has one's on the main diagonal (the elements (1,1), (2,2), (3,3)
1369and so so), non-zero values to the left and below of the main diagonal
1370and all zero's in the upper right half of the matrix.
1371.PP
1372\&\*(L"R\*(R" has non-zero values on the main diagonal as well as to the right
1373and above of the main diagonal and all zero's in the lower left half
1374of the matrix, as follows:
1375.PP
1376.Vb 5
1377\& [ 1 0 0 0 0 ] [ x x x x x ]
1378\& [ x 1 0 0 0 ] [ 0 x x x x ]
1379\& L = [ x x 1 0 0 ] R = [ 0 0 x x x ]
1380\& [ x x x 1 0 ] [ 0 0 0 x x ]
1381\& [ x x x x 1 ] [ 0 0 0 0 x ]
1382.Ve
1383.PP
1384Note that "\f(CW\*(C`L * R\*(C'\fR\*(L" is equivalent to matrix \*(R"A" in the sense that
1385\&\f(CW\*(C`L * R * x = b <==> A * x = b\*(C'\fR for all vectors \*(L"x\*(R", leaving
1386out of account permutations of the rows and columns (these are taken
1387care of \*(L"magically\*(R" by this module!) and numerical errors.
1388.PP
1389Trick:
1390.PP
1391Because we know that \*(L"L\*(R" has one's on its main diagonal, we can
1392store both matrices together in the same array without information
1393loss! I.e.,
1394.PP
1395.Vb 5
1396\& [ R R R R R ]
1397\& [ L R R R R ]
1398\& LR = [ L L R R R ]
1399\& [ L L L R R ]
1400\& [ L L L L R ]
1401.Ve
1402.PP
1403Beware, though, that \*(L"\s-1LR\s0\*(R" and "\f(CW\*(C`L * R\*(C'\fR" are not the same!!!
1404.PP
1405Note also that for the same reason, you cannot apply the method \*(L"\fInormalize()\fR\*(R"
1406to an \*(L"\s-1LR\s0\*(R" decomposition matrix. Trying to do so will yield meaningless
1407rubbish!
1408.PP
1409(You need to apply \*(L"\fInormalize()\fR\*(R" to each pair (Ai,bi) \fB\s-1BEFORE\s0\fR decomposing
1410the matrix \*(L"Ai'\*(R"!)
1411.PP
1412Now what does all this help us in solving linear equation systems?
1413.PP
1414It helps us because a triangular matrix is the next best thing
1415that can happen to us besides a diagonal matrix (a matrix that
1416has non-zero values only on its main diagonal \- in which case
1417the solution is trivial, simply divide "\f(CW\*(C`b[i]\*(C'\fR\*(L" by \*(R"\f(CW\*(C`A[i,i]\*(C'\fR\*(L"
1418to get \*(R"\f(CW\*(C`x[i]\*(C'\fR"!).
1419.PP
1420To find the solution to our problem "\f(CW\*(C`A * x = b\*(C'\fR", we divide this
1421problem in parts: instead of solving \f(CW\*(C`A * x = b\*(C'\fR directly, we first
1422decompose \*(L"A\*(R" into \*(L"L\*(R" and \*(L"R\*(R" and then solve "\f(CW\*(C`L * y = b\*(C'\fR\*(L" and
1423finally \*(R"\f(CW\*(C`R * x = y\*(C'\fR" (motto: divide and rule!).
1424.PP
1425From the illustration above it is clear that solving "\f(CW\*(C`L * y = b\*(C'\fR\*(L"
1426and \*(R"\f(CW\*(C`R * x = y\*(C'\fR" is straightforward: we immediately know that
1427\&\f(CW\*(C`y[1] = b[1]\*(C'\fR. We then deduce swiftly that
1428.PP
1429.Vb 1
1430\& y[2] = b[2] - L[2,1] * y[1]
1431.Ve
1432.PP
1433(and we know "\f(CW\*(C`y[1]\*(C'\fR" by now!), that
1434.PP
1435.Vb 1
1436\& y[3] = b[3] - L[3,1] * y[1] - L[3,2] * y[2]
1437.Ve
1438.PP
1439and so on.
1440.PP
1441Having effortlessly calculated the vector \*(L"y\*(R", we now proceed to
1442calculate the vector \*(L"x\*(R" in a similar fashion: we see immediately
1443that \f(CW\*(C`x[n] = y[n] / R[n,n]\*(C'\fR. It follows that
1444.PP
1445.Vb 1
1446\& x[n-1] = ( y[n-1] - R[n-1,n] * x[n] ) / R[n-1,n-1]
1447.Ve
1448.PP
1449and
1450.PP
1451.Vb 2
1452\& x[n-2] = ( y[n-2] - R[n-2,n-1] * x[n-1] - R[n-2,n] * x[n] )
1453\& / R[n-2,n-2]
1454.Ve
1455.PP
1456and so on.
1457.PP
1458You can see that \- especially when you have many vectors \*(L"b1\*(R"..\*(L"bk\*(R"
1459for which you are searching solutions to \f(CW\*(C`A * xi = bi\*(C'\fR \- this scheme
1460is much more efficient than a straightforward, \*(L"brute force\*(R" approach.
1461.PP
1462This method requires a quadratic matrix as its input matrix.
1463.PP
1464If you don't have that many equations, fill up with zero's (i.e., do
1465nothing to fill the superfluous rows if it's a \*(L"fresh\*(R" matrix, i.e.,
1466a matrix that has been created with \*(L"\fInew()\fR\*(R" or \*(L"\fIshadow()\fR\*(R").
1467.PP
1468The method returns an object reference to a new matrix containing the
1469matrices \*(L"L\*(R" and \*(L"R\*(R".
1470.PP
1471The input matrix is not changed by this method in any way.
1472.PP
1473Note that you can \*(L"\fIcopy()\fR\*(R" or \*(L"\fIclone()\fR\*(R" the result of this method without
1474losing its \*(L"magical\*(R" properties (for instance concerning the hidden
1475permutations of its rows and columns).
1476.PP
1477However, as soon as you are applying any method that alters the contents
1478of the matrix, its \*(L"magical\*(R" properties are stripped off, and the matrix
1479immediately reverts to an \*(L"ordinary\*(R" matrix (with the values it just happens
1480to contain at that moment, be they meaningful as an ordinary matrix or not!).
1481.RE
1482.IP "\(bu"
1483\&\f(CW\*(C`($dimension,$x_vector,$base_matrix) = $LR_matrix\*(C'\fR\f(CW\*(C`\->\*(C'\fR\f(CW\*(C`solve_LR($b_vector);\*(C'\fR
1484.PP
1485Use this method to actually solve an equation system.
1486.PP
1487Matrix "\f(CW$LR_matrix\fR\*(L" must be a (quadratic) matrix returned by the
1488method \*(R"\fIdecompose_LR()\fR\*(L", the \s-1LR\s0 decomposition matrix of the matrix
1489\&\*(R"A" of your equation system \f(CW\*(C`A * x = b\*(C'\fR.
1490.PP
1491The input vector "\f(CW$b_vector\fR\*(L" is the vector \*(R"b" in your equation system
1492\&\f(CW\*(C`A * x = b\*(C'\fR, which must be a column vector and have the same number of
1493rows as the input matrix "\f(CW$LR_matrix\fR".
1494.PP
1495The method returns a list of three items if a solution exists or an
1496empty list otherwise (!).
1497.PP
1498Therefore, you should always use this method like this:
1499.PP
1500.Vb 8
1501\& if ( ($dim,$x_vec,$base) = $LR->solve_LR($b_vec) )
1502\& {
1503\& # do something with the solution...
1504\& }
1505\& else
1506\& {
1507\& # do something with the fact that there is no solution...
1508\& }
1509.Ve
1510.PP
1511The three items returned are: the dimension "\f(CW$dimension\fR\*(L" of the solution
1512space (which is zero if only one solution exists, one if the solution is
1513a straight line, two if the solution is a plane, and so on), the solution
1514vector \*(R"\f(CW$x_vector\fR\*(L" (which is the vector \*(R"x" of your equation system
1515\&\f(CW\*(C`A * x = b\*(C'\fR) and a matrix "\f(CW$base_matrix\fR" representing a base of the
1516solution space (a set of vectors which put up the solution space like
1517the spokes of an umbrella).
1518.PP
1519Only the first "\f(CW$dimension\fR" columns of this base matrix actually
1520contain entries, the remaining columns are all zero.
1521.PP
1522Now what is all this stuff with that \*(L"base\*(R" good for?
1523.PP
1524The output vector \*(L"x\*(R" is \fB\s-1ALWAYS\s0\fR a solution of your equation system
1525\&\f(CW\*(C`A * x = b\*(C'\fR.
1526.PP
1527But also any vector "\f(CW$vector\fR"
1528.PP
1529.Vb 1
1530\& $vector = $x_vector->clone();
1531.Ve
1532.PP
1533.Vb 1
1534\& $machine_infinity = 1E+99; # or something like that
1535.Ve
1536.PP
1537.Vb 4
1538\& for ( $i = 1; $i <= $dimension; $i++ )
1539\& {
1540\& $vector += rand($machine_infinity) * $base_matrix->column($i);
1541\& }
1542.Ve
1543.PP
1544is a solution to your problem \f(CW\*(C`A * x = b\*(C'\fR, i.e., if "\f(CW$A_matrix\fR\*(L" contains
1545your matrix \*(R"A", then
1546.PP
1547.Vb 1
1548\& print abs( $A_matrix * $vector - $b_vector ), "\en";
1549.Ve
1550.PP
1551should print a number around 1E\-16 or so!
1552.PP
1553By the way, note that you can actually calculate those vectors "\f(CW$vector\fR"
1554a little more efficient as follows:
1555.PP
1556.Vb 1
1557\& $rand_vector = $x_vector->shadow();
1558.Ve
1559.PP
1560.Vb 1
1561\& $machine_infinity = 1E+99; # or something like that
1562.Ve
1563.PP
1564.Vb 4
1565\& for ( $i = 1; $i <= $dimension; $i++ )
1566\& {
1567\& $rand_vector->assign($i,1, rand($machine_infinity) );
1568\& }
1569.Ve
1570.PP
1571.Vb 1
1572\& $vector = $x_vector + ( $base_matrix * $rand_vector );
1573.Ve
1574.PP
1575Note that the input matrix and vector are not changed by this method
1576in any way.
1577.RE
1578.IP "\(bu"
1579\&\f(CW\*(C`$inverse_matrix = $LR_matrix\->invert_LR();\*(C'\fR
1580.PP
1581Use this method to calculate the inverse of a given matrix "\f(CW$LR_matrix\fR\*(L",
1582which must be a (quadratic) matrix returned by the method \*(R"\fIdecompose_LR()\fR".
1583.PP
1584The method returns an object reference to a new matrix of the same size as
1585the input matrix containing the inverse of the matrix that you initially
1586fed into \*(L"\fIdecompose_LR()\fR\*(R" \fB\s-1IF\s0 \s-1THE\s0 \s-1INVERSE\s0 \s-1EXISTS\s0\fR, or an empty list
1587otherwise.
1588.PP
1589Therefore, you should always use this method in the following way:
1590.PP
1591.Vb 8
1592\& if ( $inverse_matrix = $LR->invert_LR() )
1593\& {
1594\& # do something with the inverse matrix...
1595\& }
1596\& else
1597\& {
1598\& # do something with the fact that there is no inverse matrix...
1599\& }
1600.Ve
1601.PP
1602Note that by definition (disregarding numerical errors), the product
1603of the initial matrix and its inverse (or vice\-versa) is always a matrix
1604containing one's on the main diagonal (elements (1,1), (2,2), (3,3) and
1605so on) and zero's elsewhere.
1606.PP
1607The input matrix is not changed by this method in any way.
1608.RE
1609.IP "\(bu"
1610\&\f(CW\*(C`$condition = $matrix\->condition($inverse_matrix);\*(C'\fR
1611.PP
1612In fact this method is just a shortcut for
1613.PP
1614.Vb 1
1615\& abs($matrix) * abs($inverse_matrix)
1616.Ve
1617.PP
1618Both input matrices must be quadratic and have the same size, and the result
1619is meaningful only if one of them is the inverse of the other (for instance,
1620as returned by the method \*(L"\fIinvert_LR()\fR\*(R").
1621.PP
1622The number returned is a measure of the \*(L"condition\*(R" of the given matrix
1623"\f(CW$matrix\fR", i.e., a measure of the numerical stability of the matrix.
1624.PP
1625This number is always positive, and the smaller its value, the better the
1626condition of the matrix (the better the stability of all subsequent
1627computations carried out using this matrix).
1628.PP
1629Numerical stability means for example that if
1630.PP
1631.Vb 1
1632\& abs( $vec_correct - $vec_with_error ) < $epsilon
1633.Ve
1634.PP
1635holds, there must be a "\f(CW$delta\fR\*(L" which doesn't depend on the vector
1636\&\*(R"\f(CW$vec_correct\fR\*(L" (nor \*(R"\f(CW$vec_with_error\fR", by the way) so that
1637.PP
1638.Vb 1
1639\& abs( $matrix * $vec_correct - $matrix * $vec_with_error ) < $delta
1640.Ve
1641.PP
1642also holds.
1643.RE
1644.IP "\(bu"
1645\&\f(CW\*(C`$determinant = $LR_matrix\->det_LR();\*(C'\fR
1646.PP
1647Calculates the determinant of a matrix, whose \s-1LR\s0 decomposition matrix
1648"\f(CW$LR_matrix\fR\*(L" must be given (which must be a (quadratic) matrix
1649returned by the method \*(R"\fIdecompose_LR()\fR").
1650.PP
1651In fact the determinant is a by-product of the \s-1LR\s0 decomposition: It is
1652(in principle, that is, except for the sign) simply the product of the
1653elements on the main diagonal (elements (1,1), (2,2), (3,3) and so on)
1654of the \s-1LR\s0 decomposition matrix.
1655.PP
1656(The sign is taken care of \*(L"magically\*(R" by this module)
1657.RE
1658.IP "\(bu"
1659\&\f(CW\*(C`$order = $LR_matrix\->order_LR();\*(C'\fR
1660.PP
1661Calculates the order (called \*(L"Rang\*(R" in German) of a matrix, whose
1662\&\s-1LR\s0 decomposition matrix "\f(CW$LR_matrix\fR\*(L" must be given (which must
1663be a (quadratic) matrix returned by the method \*(R"\fIdecompose_LR()\fR").
1664.PP
1665This number is a measure of the number of linear independent row
1666and column vectors (= number of linear independent equations in
1667the case of a matrix representing an equation system) of the
1668matrix that was initially fed into \*(L"\fIdecompose_LR()\fR\*(R".
1669.PP
1670If \*(L"n\*(R" is the number of rows and columns of the (quadratic!) matrix,
1671then \*(L"n \- order\*(R" is the dimension of the solution space of the
1672associated equation system.
1673.RE
1674.IP "\(bu"
1675\&\f(CW\*(C`$rank = $LR_matrix\->rank_LR();\*(C'\fR
1676.PP
1677This is an alias for the \f(CW\*(C`order_LR()\*(C'\fR function. The \*(L"order\*(R"
1678is usually called the \*(L"rank\*(R" in the United States.
1679.RE
1680.IP "\(bu"
1681\&\f(CW\*(C`$scalar_product = $vector1\->scalar_product($vector2);\*(C'\fR
1682.PP
1683Returns the scalar product of vector "\f(CW$vector1\fR\*(L" and vector \*(R"\f(CW$vector2\fR".
1684.PP
1685Both vectors must be column vectors (i.e., a matrix having
1686several rows but only one column).
1687.PP
1688This is a (more efficient!) shortcut for
1689.PP
1690.Vb 2
1691\& $temp = ~$vector1 * $vector2;
1692\& $scalar_product = $temp->element(1,1);
1693.Ve
1694.PP
1695or the sum \f(CW\*(C`i=1..n\*(C'\fR of the products \f(CW\*(C`vector1[i] * vector2[i]\*(C'\fR.
1696.PP
1697Provided none of the two input vectors is the null vector, then
1698the two vectors are orthogonal, i.e., have an angle of 90 degrees
1699between them, exactly when their scalar product is zero, and
1700vice\-versa.
1701.RE
1702.IP "\(bu"
1703\&\f(CW\*(C`$vector_product = $vector1\->vector_product($vector2);\*(C'\fR
1704.PP
1705Returns the vector product of vector "\f(CW$vector1\fR\*(L" and vector \*(R"\f(CW$vector2\fR".
1706.PP
1707Both vectors must be column vectors (i.e., a matrix having several rows
1708but only one column).
1709.PP
1710Currently, the vector product is only defined for 3 dimensions (i.e.,
1711vectors with 3 rows); all other vectors trigger an error message.
1712.PP
1713In 3 dimensions, the vector product of two vectors \*(L"x\*(R" and \*(L"y\*(R"
1714is defined as
1715.PP
1716.Vb 3
1717\& | x[1] y[1] e[1] |
1718\& determinant | x[2] y[2] e[2] |
1719\& | x[3] y[3] e[3] |
1720.Ve
1721.PP
1722where the "\f(CW\*(C`x[i]\*(C'\fR\*(L" and \*(R"\f(CW\*(C`y[i]\*(C'\fR\*(L" are the components of the two vectors
1723\&\*(R"x\*(L" and \*(R"y\*(L", respectively, and the \*(R"\f(CW\*(C`e[i]\*(C'\fR\*(L" are unity vectors (i.e.,
1724vectors with a length equal to one) with a one in row \*(R"i" and zero's
1725elsewhere (this means that you have numbers and vectors as elements
1726in this matrix!).
1727.PP
1728This determinant evaluates to the rather simple formula
1729.PP
1730.Vb 3
1731\& z[1] = x[2] * y[3] - x[3] * y[2]
1732\& z[2] = x[3] * y[1] - x[1] * y[3]
1733\& z[3] = x[1] * y[2] - x[2] * y[1]
1734.Ve
1735.PP
1736A characteristic property of the vector product is that the resulting
1737vector is orthogonal to both of the input vectors (if neither of both
1738is the null vector, otherwise this is trivial), i.e., the scalar product
1739of each of the input vectors with the resulting vector is always zero.
1740.RE
1741.IP "\(bu"
1742\&\f(CW\*(C`$length = $vector\->length();\*(C'\fR
1743.PP
1744This is actually a shortcut for
1745.PP
1746.Vb 1
1747\& $length = sqrt( $vector->scalar_product($vector) );
1748.Ve
1749.PP
1750and returns the length of a given (column!) vector "\f(CW$vector\fR".
1751.PP
1752Note that the \*(L"length\*(R" calculated by this method is in fact the
1753\&\*(L"two\*(R"\-norm of a vector "\f(CW$vector\fR"!
1754.PP
1755The general definition for norms of vectors is the following:
1756.PP
1757.Vb 4
1758\& sub vector_norm
1759\& {
1760\& croak "Usage: \e$norm = \e$vector->vector_norm(\e$n);"
1761\& if (@_ != 2);
1762.Ve
1763.PP
1764.Vb 3
1765\& my($vector,$n) = @_;
1766\& my($rows,$cols) = ($vector->[1],$vector->[2]);
1767\& my($k,$comp,$sum);
1768.Ve
1769.PP
1770.Vb 2
1771\& croak "Math::MatrixReal::vector_norm(): vector is not a column vector"
1772\& unless ($cols == 1);
1773.Ve
1774.PP
1775.Vb 2
1776\& croak "Math::MatrixReal::vector_norm(): norm index must be > 0"
1777\& unless ($n > 0);
1778.Ve
1779.PP
1780.Vb 2
1781\& croak "Math::MatrixReal::vector_norm(): norm index must be integer"
1782\& unless ($n == int($n));
1783.Ve
1784.PP
1785.Vb 8
1786\& $sum = 0;
1787\& for ( $k = 0; $k < $rows; $k++ )
1788\& {
1789\& $comp = abs( $vector->[0][$k][0] );
1790\& $sum += $comp ** $n;
1791\& }
1792\& return( $sum ** (1 / $n) );
1793\& }
1794.Ve
1795.PP
1796Note that the case \*(L"n = 1\*(R" is the \*(L"one\*(R"\-norm for matrices applied to a
1797vector, the case \*(L"n = 2\*(R" is the euclidian norm or length of a vector,
1798and if \*(L"n\*(R" goes to infinity, you have the \*(L"infinity\*(R"\- or \*(L"maximum\*(R"\-norm
1799for matrices applied to a vector!
1800.RE
1801.IP "\(bu"
1802\&\f(CW\*(C`$xn_vector = $matrix\->\*(C'\fR\f(CW\*(C`solve_GSM($x0_vector,$b_vector,$epsilon);\*(C'\fR
1803.RE
1804.IP "\(bu"
1805\&\f(CW\*(C`$xn_vector = $matrix\->\*(C'\fR\f(CW\*(C`solve_SSM($x0_vector,$b_vector,$epsilon);\*(C'\fR
1806.RE
1807.IP "\(bu"
1808\&\f(CW\*(C`$xn_vector = $matrix\->\*(C'\fR\f(CW\*(C`solve_RM($x0_vector,$b_vector,$weight,$epsilon);\*(C'\fR
1809.PP
1810In some cases it might not be practical or desirable to solve an
1811equation system "\f(CW\*(C`A * x = b\*(C'\fR\*(L" using an analytical algorithm like
1812the \*(R"\fIdecompose_LR()\fR\*(L" and \*(R"\fIsolve_LR()\fR" method pair.
1813.PP
1814In fact in some cases, due to the numerical properties (the \*(L"condition\*(R")
1815of the matrix \*(L"A\*(R", the numerical error of the obtained result can be
1816greater than by using an approximative (iterative) algorithm like one
1817of the three implemented here.
1818.PP
1819All three methods, \s-1GSM\s0 (\*(L"Global Step Method\*(R" or \*(L"Gesamtschrittverfahren\*(R"),
1820\&\s-1SSM\s0 (\*(L"Single Step Method\*(R" or \*(L"Einzelschrittverfahren\*(R") and \s-1RM\s0 (\*(L"Relaxation
1821Method\*(R" or \*(L"Relaxationsverfahren\*(R"), are fix-point iterations, that is, can
1822be described by an iteration function "\f(CW\*(C`x(t+1) = Phi( x(t) )\*(C'\fR" which has
1823the property:
1824.PP
1825.Vb 1
1826\& Phi(x) = x <==> A * x = b
1827.Ve
1828.PP
1829We can define "\f(CWPhi(x)\fR" as follows:
1830.PP
1831.Vb 1
1832\& Phi(x) := ( En - A ) * x + b
1833.Ve
1834.PP
1835where \*(L"En\*(R" is a matrix of the same size as \*(L"A\*(R" (\*(L"n\*(R" rows and columns)
1836with one's on its main diagonal and zero's elsewhere.
1837.PP
1838This function has the required property.
1839.PP
1840Proof:
1841.PP
1842.Vb 1
1843\& A * x = b
1844.Ve
1845.PP
1846.Vb 1
1847\& <==> -( A * x ) = -b
1848.Ve
1849.PP
1850.Vb 1
1851\& <==> -( A * x ) + x = -b + x
1852.Ve
1853.PP
1854.Vb 1
1855\& <==> -( A * x ) + x + b = x
1856.Ve
1857.PP
1858.Vb 1
1859\& <==> x - ( A * x ) + b = x
1860.Ve
1861.PP
1862.Vb 1
1863\& <==> ( En - A ) * x + b = x
1864.Ve
1865.PP
1866This last step is true because
1867.PP
1868.Vb 1
1869\& x[i] - ( a[i,1] x[1] + ... + a[i,i] x[i] + ... + a[i,n] x[n] ) + b[i]
1870.Ve
1871.PP
1872is the same as
1873.PP
1874.Vb 1
1875\& ( -a[i,1] x[1] + ... + (1 - a[i,i]) x[i] + ... + -a[i,n] x[n] ) + b[i]
1876.Ve
1877.PP
1878qed
1879.PP
1880Note that actually solving the equation system "\f(CW\*(C`A * x = b\*(C'\fR" means
1881to calculate
1882.PP
1883.Vb 1
1884\& a[i,1] x[1] + ... + a[i,i] x[i] + ... + a[i,n] x[n] = b[i]
1885.Ve
1886.PP
1887.Vb 4
1888\& <==> a[i,i] x[i] =
1889\& b[i]
1890\& - ( a[i,1] x[1] + ... + a[i,i] x[i] + ... + a[i,n] x[n] )
1891\& + a[i,i] x[i]
1892.Ve
1893.PP
1894.Vb 5
1895\& <==> x[i] =
1896\& ( b[i]
1897\& - ( a[i,1] x[1] + ... + a[i,i] x[i] + ... + a[i,n] x[n] )
1898\& + a[i,i] x[i]
1899\& ) / a[i,i]
1900.Ve
1901.PP
1902.Vb 5
1903\& <==> x[i] =
1904\& ( b[i] -
1905\& ( a[i,1] x[1] + ... + a[i,i-1] x[i-1] +
1906\& a[i,i+1] x[i+1] + ... + a[i,n] x[n] )
1907\& ) / a[i,i]
1908.Ve
1909.PP
1910There is one major restriction, though: a fix-point iteration is
1911guaranteed to converge only if the first derivative of the iteration
1912function has an absolute value less than one in an area around the
1913point "\f(CWx(*)\fR\*(L" for which \*(R"\f(CW\*(C`Phi( x(*) ) = x(*)\*(C'\fR\*(L" is to be true, and
1914if the start vector \*(R"\f(CWx(0)\fR" lies within that area!
1915.PP
1916This is best verified grafically, which unfortunately is impossible
1917to do in this textual documentation!
1918.PP
1919See literature on Numerical Analysis for details!
1920.PP
1921In our case, this restriction translates to the following three conditions:
1922.PP
1923There must exist a norm so that the norm of the matrix of the iteration
1924function, \f(CW\*(C`( En \- A )\*(C'\fR, has a value less than one, the matrix \*(L"A\*(R" may
1925not have any zero value on its main diagonal and the initial vector
1926"\f(CWx(0)\fR\*(L" must be \*(R"good enough\*(L", i.e., \*(R"close enough\*(L" to the solution
1927\&\*(R"\f(CWx(*)\fR".
1928.PP
1929(Remember school math: the first derivative of a straight line given by
1930"\f(CW\*(C`y = a * x + b\*(C'\fR\*(L" is \*(R"a"!)
1931.PP
1932The three methods expect a (quadratic!) matrix "\f(CW$matrix\fR\*(L" as their
1933first argument, a start vector \*(R"\f(CW$x0_vector\fR\*(L", a vector \*(R"\f(CW$b_vector\fR\*(L"
1934(which is the vector \*(R"b\*(L" in your equation system \*(R"\f(CW\*(C`A * x = b\*(C'\fR\*(L"), in the
1935case of the \*(R"Relaxation Method\*(L" (\*(R"\s-1RM\s0\*(L"), a real number \*(R"\f(CW$weight\fR\*(L" best
1936between zero and two, and finally an error limit (real number) \*(R"\f(CW$epsilon\fR".
1937.PP
1938(Note that the weight "\f(CW$weight\fR\*(L" used by the \*(R"Relaxation Method\*(L" (\*(R"\s-1RM\s0")
1939is \fB\s-1NOT\s0\fR checked to lie within any reasonable range!)
1940.PP
1941The three methods first test the first two conditions of the three
1942conditions listed above and return an empty list if these conditions
1943are not fulfilled.
1944.PP
1945Therefore, you should always test their return value using some
1946code like:
1947.PP
1948.Vb 8
1949\& if ( $xn_vector = $A_matrix->solve_GSM($x0_vector,$b_vector,1E-12) )
1950\& {
1951\& # do something with the solution...
1952\& }
1953\& else
1954\& {
1955\& # do something with the fact that there is no solution...
1956\& }
1957.Ve
1958.PP
1959Otherwise, they iterate until \f(CW\*(C`abs( Phi(x) \- x ) < epsilon\*(C'\fR.
1960.PP
1961(Beware that theoretically, infinite loops might result if the starting
1962vector is too far \*(L"off\*(R" the solution! In practice, this shouldn't be
1963a problem. Anyway, you can always press <ctrl\-C> if you think that the
1964iteration takes too long!)
1965.PP
1966The difference between the three methods is the following:
1967.PP
1968In the \*(L"Global Step Method\*(R" (\*(L"\s-1GSM\s0\*(R"), the new vector "\f(CW\*(C`x(t+1)\*(C'\fR\*(L"
1969(called \*(R"y\*(L" here) is calculated from the vector \*(R"\f(CWx(t)\fR\*(L"
1970(called \*(R"x" here) according to the formula:
1971.PP
1972.Vb 5
1973\& y[i] =
1974\& ( b[i]
1975\& - ( a[i,1] x[1] + ... + a[i,i-1] x[i-1] +
1976\& a[i,i+1] x[i+1] + ... + a[i,n] x[n] )
1977\& ) / a[i,i]
1978.Ve
1979.PP
1980In the \*(L"Single Step Method\*(R" (\*(L"\s-1SSM\s0\*(R"), the components of the vector
1981"\f(CW\*(C`x(t+1)\*(C'\fR" which have already been calculated are used to calculate
1982the remaining components, i.e.
1983.PP
1984.Vb 5
1985\& y[i] =
1986\& ( b[i]
1987\& - ( a[i,1] y[1] + ... + a[i,i-1] y[i-1] + # note the "y[]"!
1988\& a[i,i+1] x[i+1] + ... + a[i,n] x[n] ) # note the "x[]"!
1989\& ) / a[i,i]
1990.Ve
1991.PP
1992In the \*(L"Relaxation method\*(R" (\*(L"\s-1RM\s0\*(R"), the components of the vector
1993"\f(CW\*(C`x(t+1)\*(C'\fR\*(L" are calculated by \*(R"mixing\*(L" old and new value (like
1994cold and hot water), and the weight \*(R"\f(CW$weight\fR\*(L" determines the
1995\&\*(R"aperture\*(L" of both the \*(R"hot water tap\*(L" as well as of the \*(R"cold
1996water tap", according to the formula:
1997.PP
1998.Vb 6
1999\& y[i] =
2000\& ( b[i]
2001\& - ( a[i,1] y[1] + ... + a[i,i-1] y[i-1] + # note the "y[]"!
2002\& a[i,i+1] x[i+1] + ... + a[i,n] x[n] ) # note the "x[]"!
2003\& ) / a[i,i]
2004\& y[i] = weight * y[i] + (1 - weight) * x[i]
2005.Ve
2006.PP
2007Note that the weight "\f(CW$weight\fR" should be greater than zero and
2008less than two (!).
2009.PP
2010The three methods are supposed to be of different efficiency.
2011Experiment!
2012.PP
2013Remember that in most cases, it is probably advantageous to first
2014\&\*(L"\fInormalize()\fR\*(R" your equation system prior to solving it!
2015.SH "OVERLOADED OPERATORS"
2016.IX Header "OVERLOADED OPERATORS"
2017.Sh "\s-1SYNOPSIS\s0"
2018.IX Subsection "SYNOPSIS"
2019.IP "\(bu" 2
2020Unary operators:
2021.Sp
2022"\f(CW\*(C`\-\*(C'\fR\*(L", \*(R"\f(CW\*(C`~\*(C'\fR\*(L", \*(R"\f(CW\*(C`abs\*(C'\fR", \f(CW\*(C`test\*(C'\fR, "\f(CW\*(C`!\*(C'\fR", '\f(CW""\fR'
2023.IP "\(bu" 2
2024Binary (arithmetic) operators:
2025.Sp
2026"\f(CW\*(C`+\*(C'\fR\*(L", \*(R"\f(CW\*(C`\-\*(C'\fR\*(L", \*(R"\f(CW\*(C`*\*(C'\fR\*(L", \*(R"\f(CW\*(C`**\*(C'\fR\*(L",
2027\&\*(R"\f(CW\*(C`+=\*(C'\fR\*(L", \*(R"\f(CW\*(C`\-=\*(C'\fR\*(L", \*(R"\f(CW\*(C`*=\*(C'\fR\*(L", \*(R"\f(CW\*(C`**=\*(C'\fR"
2028.IP "\(bu" 2
2029Binary (relational) operators:
2030.Sp
2031"\f(CW\*(C`==\*(C'\fR\*(L", \*(R"\f(CW\*(C`!=\*(C'\fR\*(L", \*(R"\f(CW\*(C`<\*(C'\fR\*(L", \*(R"\f(CW\*(C`<=\*(C'\fR\*(L", \*(R"\f(CW\*(C`>\*(C'\fR\*(L", \*(R"\f(CW\*(C`>=\*(C'\fR"
2032.Sp
2033"\f(CW\*(C`eq\*(C'\fR\*(L", \*(R"\f(CW\*(C`ne\*(C'\fR\*(L", \*(R"\f(CW\*(C`lt\*(C'\fR\*(L", \*(R"\f(CW\*(C`le\*(C'\fR\*(L", \*(R"\f(CW\*(C`gt\*(C'\fR\*(L", \*(R"\f(CW\*(C`ge\*(C'\fR"
2034.Sp
2035Note that the latter ("\f(CW\*(C`eq\*(C'\fR\*(L", \*(R"\f(CW\*(C`ne\*(C'\fR\*(L", ... ) are just synonyms
2036of the former (\*(R"\f(CW\*(C`==\*(C'\fR\*(L", \*(R"\f(CW\*(C`!=\*(C'\fR", ... ), defined for convenience
2037only.
2038.Sh "\s-1DESCRIPTION\s0"
2039.IX Subsection "DESCRIPTION"
2040.IP "'\-'" 5
2041Unary minus
2042.Sp
2043Returns the negative of the given matrix, i.e., the matrix with
2044all elements multiplied with the factor \*(L"\-1\*(R".
2045.Sp
2046Example:
2047.Sp
2048.Vb 1
2049\& $matrix = -$matrix;
2050.Ve
2051.IP "'~'" 5
2052Transposition
2053.Sp
2054Returns the transposed of the given matrix.
2055.Sp
2056Examples:
2057.Sp
2058.Vb 2
2059\& $temp = ~$vector * $vector;
2060\& $length = sqrt( $temp->element(1,1) );
2061.Ve
2062.Sp
2063.Vb 1
2064\& if (~$matrix == $matrix) { # matrix is symmetric ... }
2065.Ve
2066.IP "abs" 5
2067.IX Item "abs"
2068Norm
2069.Sp
2070Returns the \*(L"one\*(R"\-Norm of the given matrix.
2071.Sp
2072Example:
2073.Sp
2074.Vb 1
2075\& $error = abs( $A * $x - $b );
2076.Ve
2077.IP "test" 5
2078.IX Item "test"
2079Boolean test
2080.Sp
2081Tests wether there is at least one non-zero element in the matrix.
2082.Sp
2083Example:
2084.Sp
2085.Vb 1
2086\& if ($xn_vector) { # result of iteration is not zero ... }
2087.Ve
2088.IP "'!'" 5
2089Negated boolean test
2090.Sp
2091Tests wether the matrix contains only zero's.
2092.Sp
2093Examples:
2094.Sp
2095.Vb 2
2096\& if (! $b_vector) { # heterogenous equation system ... }
2097\& else { # homogenous equation system ... }
2098.Ve
2099.Sp
2100.Vb 1
2101\& unless ($x_vector) { # not the null-vector! }
2102.Ve
2103.ie n .IP "'""""""""'" 5
2104.el .IP "'``''``'''" 5
2105\&\*(L"Stringify\*(R" operator
2106.Sp
2107Converts the given matrix into a string.
2108.Sp
2109Uses scientific representation to keep precision loss to a minimum in case
2110you want to read this string back in again later with \*(L"\fInew_from_string()\fR\*(R".
2111.Sp
2112Uses a 13\-digit mantissa and a 20\-character field for each element so that
2113lines will wrap nicely on an 80\-column screen.
2114.Sp
2115Examples:
2116.Sp
2117.Vb 5
2118\& $matrix = Math::MatrixReal->new_from_string(<<"MATRIX");
2119\& [ 1 0 ]
2120\& [ 0 -1 ]
2121\& MATRIX
2122\& print "$matrix";
2123.Ve
2124.Sp
2125.Vb 2
2126\& [ 1.000000000000E+00 0.000000000000E+00 ]
2127\& [ 0.000000000000E+00 -1.000000000000E+00 ]
2128.Ve
2129.Sp
2130.Vb 3
2131\& $string = "$matrix";
2132\& $test = Math::MatrixReal->new_from_string($string);
2133\& if ($test == $matrix) { print ":-)\en"; } else { print ":-(\en"; }
2134.Ve
2135.IP "'+'" 5
2136Addition
2137.Sp
2138Returns the sum of the two given matrices.
2139.Sp
2140Examples:
2141.Sp
2142.Vb 1
2143\& $matrix_S = $matrix_A + $matrix_B;
2144.Ve
2145.Sp
2146.Vb 1
2147\& $matrix_A += $matrix_B;
2148.Ve
2149.IP "'\-'" 5
2150Subtraction
2151.Sp
2152Returns the difference of the two given matrices.
2153.Sp
2154Examples:
2155.Sp
2156.Vb 1
2157\& $matrix_D = $matrix_A - $matrix_B;
2158.Ve
2159.Sp
2160.Vb 1
2161\& $matrix_A -= $matrix_B;
2162.Ve
2163.Sp
2164Note that this is the same as:
2165.Sp
2166.Vb 1
2167\& $matrix_S = $matrix_A + -$matrix_B;
2168.Ve
2169.Sp
2170.Vb 1
2171\& $matrix_A += -$matrix_B;
2172.Ve
2173.Sp
2174(The latter are less efficient, though)
2175.IP "'*'" 5
2176Multiplication
2177.Sp
2178Returns the matrix product of the two given matrices or
2179the product of the given matrix and scalar factor.
2180.Sp
2181Examples:
2182.Sp
2183.Vb 1
2184\& $matrix_P = $matrix_A * $matrix_B;
2185.Ve
2186.Sp
2187.Vb 1
2188\& $matrix_A *= $matrix_B;
2189.Ve
2190.Sp
2191.Vb 1
2192\& $vector_b = $matrix_A * $vector_x;
2193.Ve
2194.Sp
2195.Vb 1
2196\& $matrix_B = -1 * $matrix_A;
2197.Ve
2198.Sp
2199.Vb 1
2200\& $matrix_B = $matrix_A * -1;
2201.Ve
2202.Sp
2203.Vb 1
2204\& $matrix_A *= -1;
2205.Ve
2206.IP "'**'" 5
2207Exponentiation
2208.Sp
2209Returns the matrix raised to an integer power. If 0 is passed,
2210the identity matrix is returned. If a negative integer is passed,
2211it computes the inverse (if it exists) and then raised the inverse
2212to the absolute value of the integer. The matrix must be quadratic.
2213.Sp
2214Examples:
2215.Sp
2216.Vb 1
2217\& $matrix2 = $matrix ** 2;
2218.Ve
2219.Sp
2220.Vb 1
2221\& $matrix **= 2;
2222.Ve
2223.Sp
2224.Vb 1
2225\& $inv2 = $matrix ** -2;
2226.Ve
2227.Sp
2228.Vb 1
2229\& $ident = $matrix ** 0;
2230.Ve
2231.IP "'=='" 5
2232Equality
2233.Sp
2234Tests two matrices for equality.
2235.Sp
2236Example:
2237.Sp
2238.Vb 1
2239\& if ( $A * $x == $b ) { print "EUREKA!\en"; }
2240.Ve
2241.Sp
2242Note that in most cases, due to numerical errors (due to the finite
2243precision of computer arithmetics), it is a bad idea to compare two
2244matrices or vectors this way.
2245.Sp
2246Better use the norm of the difference of the two matrices you want
2247to compare and compare that norm with a small number, like this:
2248.Sp
2249.Vb 1
2250\& if ( abs( $A * $x - $b ) < 1E-12 ) { print "BINGO!\en"; }
2251.Ve
2252.IP "'!='" 5
2253Inequality
2254.Sp
2255Tests two matrices for inequality.
2256.Sp
2257Example:
2258.Sp
2259.Vb 1
2260\& while ($x0_vector != $xn_vector) { # proceed with iteration ... }
2261.Ve
2262.Sp
2263(Stops when the iteration becomes stationary)
2264.Sp
2265Note that (just like with the '==' operator), it is usually a bad idea
2266to compare matrices or vectors this way. Compare the norm of the difference
2267of the two matrices with a small number instead.
2268.IP "'<'" 5
2269Less than
2270.Sp
2271Examples:
2272.Sp
2273.Vb 1
2274\& if ( $matrix1 < $matrix2 ) { # ... }
2275.Ve
2276.Sp
2277.Vb 1
2278\& if ( $vector < $epsilon ) { # ... }
2279.Ve
2280.Sp
2281.Vb 1
2282\& if ( 1E-12 < $vector ) { # ... }
2283.Ve
2284.Sp
2285.Vb 1
2286\& if ( $A * $x - $b < 1E-12 ) { # ... }
2287.Ve
2288.Sp
2289These are just shortcuts for saying:
2290.Sp
2291.Vb 1
2292\& if ( abs($matrix1) < abs($matrix2) ) { # ... }
2293.Ve
2294.Sp
2295.Vb 1
2296\& if ( abs($vector) < abs($epsilon) ) { # ... }
2297.Ve
2298.Sp
2299.Vb 1
2300\& if ( abs(1E-12) < abs($vector) ) { # ... }
2301.Ve
2302.Sp
2303.Vb 1
2304\& if ( abs( $A * $x - $b ) < abs(1E-12) ) { # ... }
2305.Ve
2306.Sp
2307Uses the \*(L"one\*(R"\-norm for matrices and Perl's built-in \*(L"\fIabs()\fR\*(R" for scalars.
2308.IP "'<='" 5
2309Less than or equal
2310.Sp
2311As with the '<' operator, this is just a shortcut for the same expression
2312with \*(L"\fIabs()\fR\*(R" around all arguments.
2313.Sp
2314Example:
2315.Sp
2316.Vb 1
2317\& if ( $A * $x - $b <= 1E-12 ) { # ... }
2318.Ve
2319.Sp
2320which in fact is the same as:
2321.Sp
2322.Vb 1
2323\& if ( abs( $A * $x - $b ) <= abs(1E-12) ) { # ... }
2324.Ve
2325.Sp
2326Uses the \*(L"one\*(R"\-norm for matrices and Perl's built-in \*(L"\fIabs()\fR\*(R" for scalars.
2327.IP "'>'" 5
2328Greater than
2329.Sp
2330As with the '<' and '<=' operator, this
2331.Sp
2332.Vb 1
2333\& if ( $xn - $x0 > 1E-12 ) { # ... }
2334.Ve
2335.Sp
2336is just a shortcut for:
2337.Sp
2338.Vb 1
2339\& if ( abs( $xn - $x0 ) > abs(1E-12) ) { # ... }
2340.Ve
2341.Sp
2342Uses the \*(L"one\*(R"\-norm for matrices and Perl's built-in \*(L"\fIabs()\fR\*(R" for scalars.
2343.IP "'>='" 5
2344Greater than or equal
2345.Sp
2346As with the '<', '<=' and '>' operator, the following
2347.Sp
2348.Vb 1
2349\& if ( $LR >= $A ) { # ... }
2350.Ve
2351.Sp
2352is simply a shortcut for:
2353.Sp
2354.Vb 1
2355\& if ( abs($LR) >= abs($A) ) { # ... }
2356.Ve
2357.Sp
2358Uses the \*(L"one\*(R"\-norm for matrices and Perl's built-in \*(L"\fIabs()\fR\*(R" for scalars.
2359.SH "SEE ALSO"
2360.IX Header "SEE ALSO"
2361Math::VectorReal, Math::PARI, Math::MatrixBool,
2362DFA::Kleene, Math::Kleene,
2363Set::IntegerRange, Set::IntegerFast .
2364.SH "VERSION"
2365.IX Header "VERSION"
2366This man page documents Math::MatrixReal version 1.9.
2367.PP
2368The latest version can always be found at
2369http://leto.net/code/Math\-MatrixReal/
2370.SH "AUTHORS"
2371.IX Header "AUTHORS"
2372Steffen Beyer <sb@engelschall.com>, Rodolphe Ortalo <ortalo@laas.fr>,
2373Jonathan Leto <jonathan@leto.net>.
2374.PP
2375Currently maintained by Jonathan Leto, send all bugs/patches to me.
2376.SH "CREDITS"
2377.IX Header "CREDITS"
2378Many thanks to Prof. Pahlings for stoking the fire of my enthusiasm for
2379Algebra and Linear Algebra at the university (\s-1RWTH\s0 Aachen, Germany), and
2380to Prof. Esser and his assistant, Mr. Jarausch, for their fascinating
2381lectures in Numerical Analysis!
2382.SH "COPYRIGHT"
2383.IX Header "COPYRIGHT"
2384Copyright (c) 1996\-2002 by Steffen Beyer, Rodolphe Ortalo, Jonathan Leto.
2385All rights reserved.
2386.SH "LICENSE AGREEMENT"
2387.IX Header "LICENSE AGREEMENT"
2388This package is free software; you can redistribute it and/or
2389modify it under the same terms as Perl itself.