<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
 <record>
  <leader>     caa a22        4500</leader>
  <controlfield tag="001">467914931</controlfield>
  <controlfield tag="003">CHVBK</controlfield>
  <controlfield tag="005">20180406152859.0</controlfield>
  <controlfield tag="007">cr unu---uuuuu</controlfield>
  <controlfield tag="008">170328e20060201xx      s     000 0 eng  </controlfield>
  <datafield tag="024" ind1="7" ind2="0">
   <subfield code="a">10.1007/s11263-005-3954-9</subfield>
   <subfield code="2">doi</subfield>
  </datafield>
  <datafield tag="035" ind1=" " ind2=" ">
   <subfield code="a">(NATIONALLICENCE)springer-10.1007/s11263-005-3954-9</subfield>
  </datafield>
  <datafield tag="100" ind1="1" ind2=" ">
   <subfield code="a">Sato</subfield>
   <subfield code="D">Jun</subfield>
   <subfield code="u">Department of Computer Science and Engineering, Nagoya Institute of Technology/JST, 466-8555, Nagoya, Japan</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="245" ind1="1" ind2="0">
   <subfield code="a">Recovering Multiple View Geometry from Mutual Projections of Multiple Cameras</subfield>
   <subfield code="h">[Elektronische Daten]</subfield>
   <subfield code="c">[Jun Sato]</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
   <subfield code="a">In this paper, we analyze the computation of epipolar geometry in some special cases where multiple cameras are projected each other in their images. In such cases, epipoles can be obtained directly from images as the projection of cameras. As the result, the epipolar geometry can be computed from less image correspondences with higher stability. In this paper, we analyze the number of corresponding points required for computing bifocal, trifocal and quadrifocal tensors linearly in the case where cameras are projected mutually. We next show a practical linear method for computing multifocal tensors by using the mutual projection of cameras. The degenerate configurations of points and cameras is also studied, and it is shown that some degenerate configurations in general cases are no longer degenerate under the mutual projection of cameras.</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
   <subfield code="a">Springer Science + Business Media, Inc., 2006</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">multifocal tensors</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">mutual projections</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">epipolar geometry</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">multiple cameras</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">camera calibration</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="773" ind1="0" ind2=" ">
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 123-140</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;123</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2="0">
   <subfield code="u">https://doi.org/10.1007/s11263-005-3954-9</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="908" ind1=" " ind2=" ">
   <subfield code="D">1</subfield>
   <subfield code="a">research-article</subfield>
   <subfield code="2">jats</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">856</subfield>
   <subfield code="E">40</subfield>
   <subfield code="u">https://doi.org/10.1007/s11263-005-3954-9</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">100</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Sato</subfield>
   <subfield code="D">Jun</subfield>
   <subfield code="u">Department of Computer Science and Engineering, Nagoya Institute of Technology/JST, 466-8555, Nagoya, Japan</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">773</subfield>
   <subfield code="E">0-</subfield>
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 123-140</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;123</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="900" ind1=" " ind2="7">
   <subfield code="a">Metadata rights reserved</subfield>
   <subfield code="b">Springer special CC-BY-NC licence</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="898" ind1=" " ind2=" ">
   <subfield code="a">BK010053</subfield>
   <subfield code="b">XK010053</subfield>
   <subfield code="c">XK010000</subfield>
  </datafield>
  <datafield tag="949" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="F">NATIONALLICENCE</subfield>
   <subfield code="b">NL-springer</subfield>
  </datafield>
 </record>
</collection>
