<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
 <record>
  <leader>     caa a22        4500</leader>
  <controlfield tag="001">467914907</controlfield>
  <controlfield tag="003">CHVBK</controlfield>
  <controlfield tag="005">20180406152859.0</controlfield>
  <controlfield tag="007">cr unu---uuuuu</controlfield>
  <controlfield tag="008">170328e20060201xx      s     000 0 eng  </controlfield>
  <datafield tag="024" ind1="7" ind2="0">
   <subfield code="a">10.1007/s11263-005-3953-x</subfield>
   <subfield code="2">doi</subfield>
  </datafield>
  <datafield tag="035" ind1=" " ind2=" ">
   <subfield code="a">(NATIONALLICENCE)springer-10.1007/s11263-005-3953-x</subfield>
  </datafield>
  <datafield tag="245" ind1="0" ind2="2">
   <subfield code="a">A Surface Reconstruction Method Using Global Graph Cut Optimization</subfield>
   <subfield code="h">[Elektronische Daten]</subfield>
   <subfield code="c">[Sylvain Paris, François Sillion, Long Quan]</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
   <subfield code="a">Surface reconstruction from multiple calibrated images has been mainly approached using local methods, either as a continuous optimization problem driven by level sets, or by discrete volumetric methods such as space carving. We propose a direct surface reconstruction approach which starts from a continuous geometric functional that is minimized up to a discretization by a global graph-cut algorithm operating on a 3D embedded graph. The method is related to the stereo disparity computation based on graph-cut formulation, but fundamentally different in two aspects. First, existing stereo disparity methods are only interested in obtaining layers of constant disparity, while we focus on high resolution surface geometry. Second, most of the existing graph-cut algorithms only reach approximate solutions, while we guarantee a global minimum. The whole procedure is consistently incorporated into a voxel representation that handles both occlusions and discontinuities. We demonstrate our algorithm on real sequences, yielding remarkably detailed surface geometry up to 1/10th of a pixel.</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
   <subfield code="a">Springer Science + Business Media, Inc., 2006</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">graph flow</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">graph cut</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">3D reconstruction from calibrated cameras</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">discontinuities</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">self-occlusions</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">occlusions</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">global minimum</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="700" ind1="1" ind2=" ">
   <subfield code="a">Paris</subfield>
   <subfield code="D">Sylvain</subfield>
   <subfield code="u">MIT CSAIL, 02139, Cambridge, MA, USA</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="700" ind1="1" ind2=" ">
   <subfield code="a">Sillion</subfield>
   <subfield code="D">François</subfield>
   <subfield code="u">ARTIS, GRAVIR/IMAG-INRIA, 38334, Saint Ismier, France</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="700" ind1="1" ind2=" ">
   <subfield code="a">Quan</subfield>
   <subfield code="D">Long</subfield>
   <subfield code="u">Department of Computer Science, Hong Kong University of Science and Technology, Hong Kong, SAR, China</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="773" ind1="0" ind2=" ">
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 141-161</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;141</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2="0">
   <subfield code="u">https://doi.org/10.1007/s11263-005-3953-x</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="908" ind1=" " ind2=" ">
   <subfield code="D">1</subfield>
   <subfield code="a">research-article</subfield>
   <subfield code="2">jats</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">856</subfield>
   <subfield code="E">40</subfield>
   <subfield code="u">https://doi.org/10.1007/s11263-005-3953-x</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">700</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Paris</subfield>
   <subfield code="D">Sylvain</subfield>
   <subfield code="u">MIT CSAIL, 02139, Cambridge, MA, USA</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">700</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Sillion</subfield>
   <subfield code="D">François</subfield>
   <subfield code="u">ARTIS, GRAVIR/IMAG-INRIA, 38334, Saint Ismier, France</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">700</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Quan</subfield>
   <subfield code="D">Long</subfield>
   <subfield code="u">Department of Computer Science, Hong Kong University of Science and Technology, Hong Kong, SAR, China</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">773</subfield>
   <subfield code="E">0-</subfield>
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 141-161</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;141</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="900" ind1=" " ind2="7">
   <subfield code="a">Metadata rights reserved</subfield>
   <subfield code="b">Springer special CC-BY-NC licence</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="898" ind1=" " ind2=" ">
   <subfield code="a">BK010053</subfield>
   <subfield code="b">XK010053</subfield>
   <subfield code="c">XK010000</subfield>
  </datafield>
  <datafield tag="949" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="F">NATIONALLICENCE</subfield>
   <subfield code="b">NL-springer</subfield>
  </datafield>
 </record>
</collection>
