<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
 <record>
  <leader>     caa a22        4500</leader>
  <controlfield tag="001">467914923</controlfield>
  <controlfield tag="003">CHVBK</controlfield>
  <controlfield tag="005">20180406152859.0</controlfield>
  <controlfield tag="007">cr unu---uuuuu</controlfield>
  <controlfield tag="008">170328e20060201xx      s     000 0 eng  </controlfield>
  <datafield tag="024" ind1="7" ind2="0">
   <subfield code="a">10.1007/s11263-005-3956-7</subfield>
   <subfield code="2">doi</subfield>
  </datafield>
  <datafield tag="035" ind1=" " ind2=" ">
   <subfield code="a">(NATIONALLICENCE)springer-10.1007/s11263-005-3956-7</subfield>
  </datafield>
  <datafield tag="245" ind1="0" ind2="0">
   <subfield code="a">Appearance-Cloning: Photo-Consistent Scene Recovery from Multi-View Images</subfield>
   <subfield code="h">[Elektronische Daten]</subfield>
   <subfield code="c">[Howon Kim, In Kweon]</subfield>
  </datafield>
  <datafield tag="520" ind1="3" ind2=" ">
   <subfield code="a">This paper introduces the novel volumetric methodology &quot;appearance-cloning” as a viable solution for achieving a more improved photo-consistent scene recovery, including a greatly enhanced geometric recovery performance, from a set of photographs taken at arbitrarily distributed multiple camera viewpoints. We do so while solving many of the problems associated with previous stereo-based and volumetric methodologies. We redesign the photo-consistency decision problem of individual voxel in volumetric space as the photo-consistent shape search problem in image space, by generalizing the concept of the point correspondence search between two images in stereo-based approach, within a volumetric framework. In detail, we introduce a self-constrained greedy-style optimization methodology, which iteratively searches a more photo-consistent shape based on the probabilistic shape photo-consistency measure, by using the probabilistic competition between candidate shapes. Our new measure is designed to bring back the probabilistic photo-consistency of a shape by comparing the appearances captured from multiple cameras with those rendered from that shape using the per-pixel Maxwell model in image space. Through various scene recoveries experiments including specular and dynamic scenes, we demonstrate that if sufficient appearances are given enough to reflect scene characteristics, our appearance-cloning approach can successfully recover both the geometry and photometry information of a scene without any kind of scene-dependent algorithm tuning.</subfield>
  </datafield>
  <datafield tag="540" ind1=" " ind2=" ">
   <subfield code="a">Springer Science + Business Media, Inc., 2006</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">image-based modeling</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">volumetric reconstruction</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">voxel coloring</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">space carving</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">appearance</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">photo-consistency</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="690" ind1=" " ind2="7">
   <subfield code="a">color similarity</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="700" ind1="1" ind2=" ">
   <subfield code="a">Kim</subfield>
   <subfield code="D">Howon</subfield>
   <subfield code="u">Department of Electrical Engineering and Computer Science, Korea Advanced Institute of Science and Technology, 373-1 Guseong-dong, Yuseong-gu, Daejeon, Korea</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="700" ind1="1" ind2=" ">
   <subfield code="a">Kweon</subfield>
   <subfield code="D">In</subfield>
   <subfield code="u">Department of Electrical Engineering and Computer Science, Korea Advanced Institute of Science and Technology, 373-1 Guseong-dong, Yuseong-gu, Daejeon, Korea</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="773" ind1="0" ind2=" ">
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 163-192</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;163</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="856" ind1="4" ind2="0">
   <subfield code="u">https://doi.org/10.1007/s11263-005-3956-7</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="908" ind1=" " ind2=" ">
   <subfield code="D">1</subfield>
   <subfield code="a">research-article</subfield>
   <subfield code="2">jats</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">856</subfield>
   <subfield code="E">40</subfield>
   <subfield code="u">https://doi.org/10.1007/s11263-005-3956-7</subfield>
   <subfield code="q">text/html</subfield>
   <subfield code="z">Onlinezugriff via DOI</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">700</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Kim</subfield>
   <subfield code="D">Howon</subfield>
   <subfield code="u">Department of Electrical Engineering and Computer Science, Korea Advanced Institute of Science and Technology, 373-1 Guseong-dong, Yuseong-gu, Daejeon, Korea</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">700</subfield>
   <subfield code="E">1-</subfield>
   <subfield code="a">Kweon</subfield>
   <subfield code="D">In</subfield>
   <subfield code="u">Department of Electrical Engineering and Computer Science, Korea Advanced Institute of Science and Technology, 373-1 Guseong-dong, Yuseong-gu, Daejeon, Korea</subfield>
   <subfield code="4">aut</subfield>
  </datafield>
  <datafield tag="950" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="P">773</subfield>
   <subfield code="E">0-</subfield>
   <subfield code="t">International Journal of Computer Vision</subfield>
   <subfield code="d">Kluwer Academic Publishers</subfield>
   <subfield code="g">66/2(2006-02-01), 163-192</subfield>
   <subfield code="x">0920-5691</subfield>
   <subfield code="q">66:2&lt;163</subfield>
   <subfield code="1">2006</subfield>
   <subfield code="2">66</subfield>
   <subfield code="o">11263</subfield>
  </datafield>
  <datafield tag="900" ind1=" " ind2="7">
   <subfield code="a">Metadata rights reserved</subfield>
   <subfield code="b">Springer special CC-BY-NC licence</subfield>
   <subfield code="2">nationallicence</subfield>
  </datafield>
  <datafield tag="898" ind1=" " ind2=" ">
   <subfield code="a">BK010053</subfield>
   <subfield code="b">XK010053</subfield>
   <subfield code="c">XK010000</subfield>
  </datafield>
  <datafield tag="949" ind1=" " ind2=" ">
   <subfield code="B">NATIONALLICENCE</subfield>
   <subfield code="F">NATIONALLICENCE</subfield>
   <subfield code="b">NL-springer</subfield>
  </datafield>
 </record>
</collection>
