aboutsummaryrefslogtreecommitdiffstats
path: root/src/classes/share
diff options
context:
space:
mode:
authorKevin Rushforth <[email protected]>2005-11-01 17:13:23 +0000
committerKevin Rushforth <[email protected]>2005-11-01 17:13:23 +0000
commit81ce3f9030a7e9704fcaece5d4b23238b4828bdc (patch)
treec957dfc209f2817c854985695a64eaa5f1013b41 /src/classes/share
parent775731d3206296faa81e01a07aab8681107c58fe (diff)
1. Finished inclusion of material from Specification Guide into javadoc spec. Added some links to new material.
2. Created a 1.4 ChangeLog git-svn-id: https://svn.java.net/svn/j3d-core~svn/trunk@456 ba19aa83-45c5-6ac9-afd3-db810772062c
Diffstat (limited to 'src/classes/share')
-rw-r--r--src/classes/share/javax/media/j3d/Behavior.java8
-rw-r--r--src/classes/share/javax/media/j3d/Canvas3D.java8
-rw-r--r--src/classes/share/javax/media/j3d/Locale.java6
-rw-r--r--src/classes/share/javax/media/j3d/Node.java5
-rw-r--r--src/classes/share/javax/media/j3d/NodeComponent.java4
-rw-r--r--src/classes/share/javax/media/j3d/SceneGraphObject.java4
-rw-r--r--src/classes/share/javax/media/j3d/Screen3D.java9
-rw-r--r--src/classes/share/javax/media/j3d/View.java8
-rw-r--r--src/classes/share/javax/media/j3d/VirtualUniverse.java6
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors.html585
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors1.gifbin0 -> 9067 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors2.gifbin0 -> 2223 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors3.gifbin0 -> 2189 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors4.gifbin0 -> 2452 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors5.gifbin0 -> 4743 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors6.gifbin0 -> 4535 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors7.gifbin0 -> 4463 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Behaviors8.gifbin0 -> 22297 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Concepts.html4
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Immediate.html103
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Immediate1.gifbin0 -> 17085 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/Rendering.html137
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing.html239
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing1.gifbin0 -> 22601 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing2.gifbin0 -> 17159 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing3.gifbin0 -> 13186 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing4.gifbin0 -> 12419 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing5.gifbin0 -> 10695 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel.html1053
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel1.gifbin0 -> 19585 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel10.gifbin0 -> 29787 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel11.gifbin0 -> 15569 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel12.gifbin0 -> 15520 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel13.gifbin0 -> 14982 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel14.gifbin0 -> 11968 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel2.gifbin0 -> 17085 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel3.gifbin0 -> 22430 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel4.gifbin0 -> 16502 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel5.gifbin0 -> 9524 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel6.gifbin0 -> 10590 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel7.gifbin0 -> 10958 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel8.gifbin0 -> 16583 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/ViewModel9.gifbin0 -> 14194 bytes
-rw-r--r--src/classes/share/javax/media/j3d/doc-files/intro.html7
44 files changed, 2176 insertions, 10 deletions
diff --git a/src/classes/share/javax/media/j3d/Behavior.java b/src/classes/share/javax/media/j3d/Behavior.java
index 6e00081..35e6c13 100644
--- a/src/classes/share/javax/media/j3d/Behavior.java
+++ b/src/classes/share/javax/media/j3d/Behavior.java
@@ -179,6 +179,14 @@ import java.util.Enumeration;
* instance of a Behavior. Sharing wakeup criteria among different
* instances of a Behavior is illegal.
*
+ * <p>
+ * <b>Additional Information</b>
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/Behaviors.html">Behaviors and Interpolators</a>
+ * documents.
+ *
* @see WakeupCondition
*/
diff --git a/src/classes/share/javax/media/j3d/Canvas3D.java b/src/classes/share/javax/media/j3d/Canvas3D.java
index 6d887b9..c79b3c2 100644
--- a/src/classes/share/javax/media/j3d/Canvas3D.java
+++ b/src/classes/share/javax/media/j3d/Canvas3D.java
@@ -255,6 +255,14 @@ import java.util.*;
* serialize a Canvas3D object will result in an
* UnsupportedOperationException being thrown.
*
+ * <p>
+ * <b>Additional Information</b>
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/ViewModel.html">View Model</a>
+ * documents.
+ *
* @see Screen3D
* @see View
* @see GraphicsContext3D
diff --git a/src/classes/share/javax/media/j3d/Locale.java b/src/classes/share/javax/media/j3d/Locale.java
index 157cbde..95a0bfb 100644
--- a/src/classes/share/javax/media/j3d/Locale.java
+++ b/src/classes/share/javax/media/j3d/Locale.java
@@ -29,6 +29,12 @@ import java.util.ArrayList;
* coordinates, and methods to add, remove, and enumerate the branch
* graphs.
*
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/VirtualUniverse.html">Scene Graph Superstructure</a>
+ * documents.
+ *
* @see VirtualUniverse
* @see HiResCoord
* @see BranchGroup
diff --git a/src/classes/share/javax/media/j3d/Node.java b/src/classes/share/javax/media/j3d/Node.java
index 3853a63..eaf5d8e 100644
--- a/src/classes/share/javax/media/j3d/Node.java
+++ b/src/classes/share/javax/media/j3d/Node.java
@@ -20,6 +20,11 @@ import java.lang.reflect.Constructor;
* The Node class provides an abstract class for all Group and Leaf Nodes.
* It provides a common framework for constructing a Java 3D scene graph,
* specifically bounding volumes.
+ *
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a>.
+ *
* <p>
* NOTE: Applications should <i>not</i> extend this class directly.
*/
diff --git a/src/classes/share/javax/media/j3d/NodeComponent.java b/src/classes/share/javax/media/j3d/NodeComponent.java
index 9288d08..d40451d 100644
--- a/src/classes/share/javax/media/j3d/NodeComponent.java
+++ b/src/classes/share/javax/media/j3d/NodeComponent.java
@@ -16,6 +16,10 @@ import java.util.Hashtable;
/**
* NodeComponent is a common superclass for all scene graph node
* component objects such as: Geometry, Appearance, Material, Texture, etc.
+ *
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a>.
*/
public abstract class NodeComponent extends SceneGraphObject {
diff --git a/src/classes/share/javax/media/j3d/SceneGraphObject.java b/src/classes/share/javax/media/j3d/SceneGraphObject.java
index 32c52b8..ffcfed1 100644
--- a/src/classes/share/javax/media/j3d/SceneGraphObject.java
+++ b/src/classes/share/javax/media/j3d/SceneGraphObject.java
@@ -46,6 +46,10 @@ import java.util.Hashtable;
* <code>ENABLE_PICK_REPORTING</code> bits are not really capability bits,
* although they are set with the setCapability method. The default value
* for each of the <code>ENABLE_*_REPORTING bits</code> is false.
+ *
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a>.
*/
public abstract class SceneGraphObject extends Object {
// Any global flags? (e.g., execution cullable, collideable)
diff --git a/src/classes/share/javax/media/j3d/Screen3D.java b/src/classes/share/javax/media/j3d/Screen3D.java
index 4b07e58..4f6ce05 100644
--- a/src/classes/share/javax/media/j3d/Screen3D.java
+++ b/src/classes/share/javax/media/j3d/Screen3D.java
@@ -68,6 +68,15 @@ import java.util.Hashtable;
* specified (setHeadTrackerToLeftImagePlate and
* setHeadTrackerToRightImagePlate methods).</LI><P></UL>
* </UL><P>
+ *
+ * <p>
+ * <b>Additional Information</b>
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/ViewModel.html">View Model</a>
+ * documents.
+ *
* @see Canvas3D
* @see Canvas3D#getScreen3D
*/
diff --git a/src/classes/share/javax/media/j3d/View.java b/src/classes/share/javax/media/j3d/View.java
index daf6cd4..ff6a964 100644
--- a/src/classes/share/javax/media/j3d/View.java
+++ b/src/classes/share/javax/media/j3d/View.java
@@ -389,6 +389,14 @@ import com.sun.j3d.utils.universe.*; // Needed for Support of DVR.
* the viewing frustum for the left and right eye.<P>
* </UL>
*
+ * <p>
+ * <b>Additional Information</b>
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/ViewModel.html">View Model</a>
+ * documents.
+ *
* @see Canvas3D
* @see PhysicalBody
* @see PhysicalEnvironment
diff --git a/src/classes/share/javax/media/j3d/VirtualUniverse.java b/src/classes/share/javax/media/j3d/VirtualUniverse.java
index 20c783b..60ececd 100644
--- a/src/classes/share/javax/media/j3d/VirtualUniverse.java
+++ b/src/classes/share/javax/media/j3d/VirtualUniverse.java
@@ -33,6 +33,12 @@ import java.util.Map;
* A VirtualUniverse object defines methods to enumerate its Locale
* objects and to remove them from the virtual universe.
*
+ * <p>
+ * For more information, see the
+ * <a href="doc-files/intro.html">Introduction to the Java 3D API</a> and
+ * <a href="doc-files/VirtualUniverse.html">Scene Graph Superstructure</a>
+ * documents.
+ *
* @see Locale
*/
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors.html b/src/classes/share/javax/media/j3d/doc-files/Behaviors.html
index 62d6fdd..7bcc4a2 100644
--- a/src/classes/share/javax/media/j3d/doc-files/Behaviors.html
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors.html
@@ -7,7 +7,590 @@
</head>
<body>
<h2>Behaviors and Interpolators</h2>
-<p><br>
+<p><a href="../Behavior.html">Behavior</a> nodes provide the means for
+animating objects, processing keyboard and mouse inputs, reacting to
+movement, and enabling and processing pick events. Behavior nodes
+contain Java code and state variables. A Behavior node's Java code can
+interact with Java objects, change node values within a Java&nbsp;3D
+scene
+graph, change the behavior's internal state-in general, perform any
+computation it wishes.
</p>
+<p>Simple behaviors can add surprisingly interesting effects to a scene
+graph. For example, one can animate a rigid object by using a Behavior
+node to repetitively modify the TransformGroup node that points to the
+object one wishes to animate. Alternatively, a Behavior node can track
+the current position of a mouse and modify portions of the scene graph
+in response.</p>
+<h2>Behavior Object</h2>
+<p>A Behavior leaf node object contains a scheduling region and two
+methods: an <code>initialize</code> method called once when the
+behavior becomes "live" and a <code>processStimulus</code>
+method called whenever appropriate by the Java&nbsp;3D behavior
+scheduler.
+The Behavior object also contains the state information needed by its <code>initialize</code>
+and <code>processStimulus</code> methods.
+</p>
+<p>The <em>scheduling region</em> defines a spatial volume that serves
+to enable the scheduling of Behavior nodes. A Behavior node is <em>active</em>
+(can receive stimuli) whenever an active ViewPlatform's activation
+volume intersects a Behavior object's scheduling region. Only active
+behaviors can receive stimuli.
+</p>
+<p>The <em>scheduling interval</em> defines a
+partial order of execution for behaviors that wake up in response to
+the same wakeup condition (that is, those behaviors that are processed
+at the same "time"). Given a set of behaviors whose wakeup conditions
+are satisfied at the same time, the behavior scheduler will execute all
+behaviors in a lower scheduling interval before executing any behavior
+in a higher scheduling interval. Within a scheduling interval,
+behaviors can be executed in any order, or in parallel. Note that this
+partial ordering is only guaranteed for those behaviors that wake up at
+the same time in response to the same wakeup condition, for example,
+the set of behaviors that wake up every frame in response to a
+WakeupOnElapsedFrames(0) wakeup condition.
+</p>
+<p>The <code>processStimulus</code> method receives and processes a
+behavior's ongoing messages. The Java&nbsp;3D behavior scheduler
+invokes a
+Behavior node's <code>processStimulus</code>
+method when an active ViewPlatform's activation volume intersects a
+Behavior object's scheduling region and all of that behavior's wakeup
+criteria are satisfied. The <code>processStimulus</code> method
+performs its computations and actions (possibly including the
+registration of state change information that could cause Java&nbsp;3D
+to
+wake other Behavior objects), establishes its next wakeup condition,
+and finally exits.
+</p>
+<p>A typical behavior will modify one or more nodes or node components
+in
+the scene graph. These modifications can happen in parallel with
+rendering. In general, applications cannot count on behavior execution
+being synchronized with rendering. There are two exceptions to this
+general rule:
+</p>
+<ul>
+ <li>All modifications to scene graph objects (not including geometry
+by-reference or texture by-reference) made from the <code>processStimulus</code>
+method of a single behavior instance are guaranteed to take effect in
+the same rendering frame</li>
+</ul>
+<ul>
+ <li>All modifications to scene graph objects (not including geometry
+by-reference or texture by-reference) made from the <code>processStimulus</code>
+methods of the set of behaviors that wake up in response to a
+WakeupOnElapsedFrames(0) wakeup condition are guaranteed to take effect
+in the same rendering frame.</li>
+</ul>
+<p>Note that modifications to geometry by-reference or texture
+by-reference are not guaranteed to show up in the same frame as other
+scene graph changes.
+</p>
+<h3>Code Structure</h3>
+<p>When the Java&nbsp;3D behavior scheduler invokes a Behavior object's
+<code>processStimulus</code>
+method, that method may perform any computation it wishes. Usually, it
+will change its internal state and specify its new wakeup conditions.
+Most probably, it will manipulate scene graph elements. However, the
+behavior code can change only those aspects of a scene graph element
+permitted by the capabilities associated with that scene graph element.
+A scene graph's capabilities restrict behavioral manipulation to those
+manipulations explicitly allowed.
+</p>
+<p>The application must provide the Behavior object with references to
+those scene graph elements that the Behavior object will manipulate.
+The application provides those references as arguments to the
+behavior's constructor when it creates the Behavior object.
+Alternatively, the Behavior object itself can obtain access to the
+relevant scene graph elements either when Java&nbsp;3D invokes its <code>initialize</code>
+method or each time Java&nbsp;3D invokes its <code>processStimulus</code>
+method.
+</p>
+<p>Behavior methods have a very rigid structure. Java&nbsp;3D assumes
+that
+they
+always run to completion (if needed, they can spawn threads). Each
+method's basic structure consists of the following:
+</p>
+<ul>
+ <li>Code to decode and extract references from the WakeupCondition
+enumeration that caused the object's awakening.</li>
+</ul>
+<ul>
+ <li>Code to perform the manipulations associated with the
+WakeupCondition.</li>
+</ul>
+<ul>
+ <li>Code to establish this behavior's new WakeupCondition.</li>
+</ul>
+<ul>
+ <li>A path to Exit (so that execution returns to the Java&nbsp;3D
+behavior
+scheduler).</li>
+</ul>
+<h3>WakeupCondition Object</h3>
+<p>A <a href="../WakeupCondition.html">WakeupCondition</a> object is
+an
+abstract class specialized to fourteen
+different WakeupCriterion objects and to four combining objects
+containing multiple WakeupCriterion objects.
+</p>
+<p>A Behavior node provides the Java&nbsp;3D behavior scheduler with a
+WakeupCondition object. When that object's WakeupCondition has been
+satisfied, the behavior scheduler hands that same WakeupCondition back
+to the Behavior via an enumeration.
+</p>
+<p>
+</p>
+<h3>WakeupCriterion Object</h3>
+<p>Java&nbsp;3D provides a rich set of wakeup criteria that Behavior
+objects
+can use in specifying a complex WakeupCondition. These wakeup criteria
+can cause Java&nbsp;3D's behavior scheduler to invoke a behavior's <code>processStimulus</code>
+method whenever
+</p>
+<ul>
+ <li>The center of an active ViewPlatform enters a specified region.</li>
+</ul>
+<ul>
+ <li>The center of an active ViewPlatform exits a specified region.</li>
+</ul>
+<ul>
+ <li>A behavior is activated.</li>
+</ul>
+<ul>
+ <li>A behavior is deactivated.</li>
+</ul>
+<ul>
+ <li>A specified TransformGroup node's transform changes.</li>
+</ul>
+<ul>
+ <li>Collision is detected between a specified Shape3D node's Geometry
+object and any other object.</li>
+</ul>
+<ul>
+ <li>Movement occurs between a specified Shape3D node's Geometry
+object and any other object with which it collides.</li>
+</ul>
+<ul>
+ <li>A specified Shape3D node's Geometry object no longer collides
+with any other object.</li>
+</ul>
+<ul>
+ <li>A specified Behavior object posts a specific event.</li>
+</ul>
+<ul>
+ <li>A specified AWT event occurs.</li>
+</ul>
+<ul>
+ <li>A specified time interval elapses.</li>
+</ul>
+<ul>
+ <li>A specified number of frames have been drawn.</li>
+</ul>
+<ul>
+ <li>The center of a specified Sensor enters a specified region.</li>
+</ul>
+<ul>
+ <li>The center of a specified Sensor exits a specified region.</li>
+</ul>
+<p>A Behavior object constructs a <a href="../WakeupCriterion.html">WakeupCriterion</a>
+by constructing the
+appropriate criterion object. The Behavior object must provide the
+appropriate arguments (usually a reference to some scene graph object
+and possibly a region of interest). Thus, to specify a
+WakeupOnViewPlatformEntry, a behavior would specify the region that
+will cause the behavior to execute if an active ViewPlatform enters it.
+</p>
+<h3>Composing WakeupCriterion
+Objects</h3>
+<p>A Behavior object can combine multiple WakeupCriterion objects into
+a
+more powerful, composite WakeupCondition. Java&nbsp;3D behaviors
+construct a
+composite WakeupCondition in one of the following ways:
+</p>
+<ul>
+ <li><a href="../WakeupAnd.html">WakeupAnd</a>: An array of
+WakeupCriterion objects ANDed together.</li>
+</ul>
+<pre> WakeupCriterion &amp;&amp; WakeupCriterion &amp;&amp; ...<br></pre>
+<ul>
+ <li><a href="../WakeupOr.html">WakeupOr</a>: An array of
+WakeupCriterion objects ORed together.</li>
+</ul>
+<pre> WakeupCriterion || WakeupCriterion || ...<br></pre>
+<ul>
+ <li><a href="../WakeupAndOfOrs.html">WakeupAndOfOrs</a>: An array of
+WakeupOr WakeupCondition objects that
+are then ANDed together.</li>
+</ul>
+<pre> WakeupOr &amp;&amp; WakeupOr &amp;&amp; ...<br></pre>
+<ul>
+ <li><a href="../WakeupOrOfAnds.html">WakeupOrOfAnds</a>: An array of
+WakeupAnd WakeupCondition objects
+that are then ORed together.</li>
+</ul>
+<pre> WakeupAnd || WakeupAnd || ...<br></pre>
+<h2>Composing Behaviors</h2>
+<p>Behavior objects can condition themselves to awaken only when
+signaled
+by another Behavior node. The <a href="../WakeupOnBehaviorPost.html">WakeupOnBehaviorPost</a>
+WakeupCriterion
+takes as arguments a reference to a Behavior node and an integer. These
+two arguments allow a behavior to limit its wakeup criterion to a
+specific post by a specific behavior.
+</p>
+<p>The WakeupOnBehaviorPost WakeupCriterion permits behaviors to chain
+their computations, allowing parenthetical computations-one behavior
+opens a door and the second closes the same door, or one behavior
+highlights an object and the second unhighlights the same object.
+</p>
+<p>
+</p>
+<h2>Scheduling</h2>
+<p>As a virtual universe grows large, Java&nbsp;3D must carefully
+husband
+its
+resources to ensure adequate performance. In a 10,000-object virtual
+universe with 400 or so Behavior nodes, a naive implementation of Java
+3D could easily end up consuming the majority of its compute cycles in
+executing the behaviors associated with the 400 Behavior objects before
+it draws a frame. In such a situation, the frame rate could easily drop
+to unacceptable levels.
+</p>
+<p>Behavior objects are usually associated with geometric objects in
+the
+virtual universe. In our example of 400 Behavior objects scattered
+throughout a 10,000-object virtual universe, only a few of these
+associated geometric objects would be visible at a given time. A
+sizable fraction of the Behavior nodes-those associated with nonvisible
+objects-need not be executed. Only those relatively few Behavior
+objects that are associated with visible objects must be executed.
+</p>
+<p>Java&nbsp;3D mitigates the problem of a large number of Behavior
+nodes in
+a
+high-population virtual universe through execution culling-choosing to
+invoke only those behaviors that have high relevance.
+</p>
+<p>Java&nbsp;3D requires each behavior to have a <em>scheduling region</em>
+and to post a wakeup condition. Together a behavior's scheduling region
+and wakeup condition provide Java&nbsp;3D's behavior scheduler with
+sufficient domain knowledge to selectively prune behavior invocations
+and invoke only those behaviors that absolutely need to be executed.
+</p>
+<p>
+</p>
+<h2>How Java&nbsp;3D Performs
+Execution Culling</h2>
+<p>Java&nbsp;3D finds all scheduling regions associated with Behavior
+nodes
+and
+constructs a scheduling/volume tree. It also creates an AND/OR tree
+containing all the Behavior node wakeup criteria. These two data
+structures provide the domain knowledge Java&nbsp;3D needs to prune
+unneeded
+behavior execution (to perform "execution triage").
+</p>
+<p>Java&nbsp;3D must track a behavior's wakeup conditions only if an
+active
+ViewPlatform object's activation volume intersects with that Behavior
+object's scheduling region. If the ViewPlatform object's activation
+volume does not intersect with a behavior's scheduling region,
+Java&nbsp;3D
+can safely ignore that behavior's wakeup criteria.
+</p>
+<p>In essence, the Java&nbsp;3D scheduler performs the following
+checks:
+</p>
+<ul>
+ <li>Find all Behavior objects with scheduling regions that intersect
+the active ViewPlatform object's activation volume.</li>
+</ul>
+<ul>
+ <li>For each Behavior object within the ViewPlatform's activation
+volume, if that behavior's WakeupCondition is <code>true</code>,
+schedule that Behavior object for execution.</li>
+</ul>
+<p>Java&nbsp;3D's behavior scheduler executes those Behavior objects
+that
+have
+been scheduled by calling the behavior's <code>processStimulus</code>
+method.
+</p>
+<h2>Interpolator Behaviors</h2>
+<p>This section describes Java&nbsp;3D's predefined <a
+ href="../Interpolator.html">Interpolator</a> behaviors.
+They are called <em>interpolators</em>
+because they smoothly interpolate between the two extreme values that
+an interpolator can produce. Interpolators perform simple behavioral
+acts, yet they provide broad functionality.
+</p>
+<p>The Java&nbsp;3D API provides interpolators for a number of
+functions:
+manipulating transforms within a TransformGroup, modifying the values
+of a Switch node, and modifying Material attributes such as color and
+transparency.
+</p>
+<p>These predefined Interpolator behaviors share the same mechanism for
+specifying and later for converting a temporal value into an alpha
+value. Interpolators consist of two portions: a generic portion that
+all interpolators share and a domain-specific portion.
+</p>
+<p>The generic portion maps time in milliseconds onto a value in the
+range
+[0.0, 1.0] inclusive. The domain-specific portion maps an alpha value
+in the range [0.0, 1.0] onto a value appropriate to the predefined
+behavior's range of outputs. An alpha value of 0.0 generates an
+interpolator's minimum value, an alpha value of 1.0 generates an
+interpolator's maximum value, and an alpha value somewhere in between
+generates a value proportionally in between the minimum and maximum
+values.
+</p>
+<h3>Mapping Time to Alpha</h3>
+<p>Several parameters control the mapping of time onto an alpha value
+(see
+the javadoc for the <a href="../Alpha.html">Alpha</a> object for a
+description of the API).
+That mapping is deterministic as long as its parameters do not change.
+Thus, two different interpolators with the same parameters will
+generate the same alpha value given the same time value. This means
+that two interpolators that do not communicate can still precisely
+coordinate their activities, even if they reside in different threads
+or even different processors-as long as those processors have
+consistent clocks.
+</p>
+<p><a href="#Figure_1">Figure
+1</a>
+shows the components of an interpolator's time-to-alpha mapping. Time
+is represented on the horizontal axis. Alpha is represented on the
+vertical axis. As we move from left to right, we see the alpha value
+start at 0.0, rise to 1.0, and then decline back to 0.0 on the
+right-hand side.
+</p>
+<p>On the left-hand side, the trigger time defines
+when this interpolator's waveform begins in milliseconds. The region
+directly to the right of the trigger time, labeled Phase Delay, defines
+a time period where the waveform does not change. During phase delays
+alpha is either 0 or 1, depending on which region it precedes.
+</p>
+<p>Phase delays provide an important means for offsetting multiple
+interpolators from one another, especially where the interpolators have
+all the same parameters. The next four regions, labeled <b>&#945;</b>
+increasing, <b>&#945;</b> at 1, <b>&#945;</b> decreasing, and
+<b>&#945;</b> at 0, all specify durations for
+the corresponding values
+of alpha.
+</p>
+<p>Interpolators have a loop count that determines how many times to
+repeat the sequence of alpha increasing, alpha at 1, alpha decreasing,
+and alpha at 0; they also have associated mode flags that enable either
+the increasing or decreasing portions, or both, of the waveform.
+</p>
+<p><a name="Figure_1"></a><img style="width: 500px; height: 141px;"
+ alt="Time-to-Alpha Mapping" title="Time-to-Alpha Mapping"
+ src="Behaviors1.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 1</i> &#8211; An Interpolator's Generic
+Time-to-Alpha Mapping Sequence</b></font>
+</ul>
+<p>
+Developers can use the loop count in conjunction with the mode flags to
+generate various kinds of actions. Specifying a loop count of 1 and
+enabling the mode flag for only the alpha-increasing and alpha-at-1
+portion of the waveform, we would get the waveform shown in <a
+ href="#Figure_2">Figure
+2</a>.
+</p>
+<p><a name="Figure_2"></a><img style="width: 241px; height: 100px;"
+ alt="Alpha Increasing" title="Alpha Increasing" src="Behaviors2.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 2</i> &#8211; An Interpolator Set to a Loop
+Count of 1 with Mode Flags Set to Enable
+Only the Alpha-Increasing and Alpha-at-1 Portion of the Waveform</b></font>
+</ul>
+<p>
+In <a href="#Figure_2">Figure
+2</a>,
+the alpha value is 0 before the combination of trigger time plus the
+phase delay duration. The alpha value changes from 0 to 1 over a
+specified interval of time, and thereafter the alpha value remains 1
+(subject to the reprogramming of the interpolator's parameters). A
+possible use of a single alpha-increasing value might be to combine it
+with a rotation interpolator to program a door opening.
+</p>
+<p>Similarly, by specifying a loop count of 1 and
+a mode flag that enables only the alpha-decreasing and alpha-at-0
+portion of the waveform, we would get the waveform shown in <a
+ href="#Figure_3">Figure
+3</a>.
+</p>
+<p>In <a href="#Figure_3">Figure
+3</a>,
+the alpha value is 1 before the combination of trigger time plus the
+phase delay duration. The alpha value changes from 1 to 0 over a
+specified interval; thereafter the alpha value remains 0 (subject to
+the reprogramming of the interpolator's parameters). A possible use of
+a single <b>&#945;</b>-decreasing value might be to combine it with a
+rotation
+interpolator to program a door closing.
+</p>
+<p><a name="Figure_3"></a><img style="width: 241px; height: 88px;"
+ alt="Alpha Decreasing" title="Alpha Decreasing" src="Behaviors3.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 3</i> &#8211; An Interpolator Set to a Loop
+Count of 1 with Mode Flags Set to Enable
+Only the Alpha-Decreasing and Alpha-at-0 Portion of the Waveform</b></font>
+</ul>
+<p>
+We can combine both of the above waveforms by specifying a loop count
+of 1 and setting the mode flag to enable both the alpha-increasing and
+alpha-at-1 portion of the waveform as well as the alpha-decreasing and
+alpha-at-0 portion of the waveform. This combination would result in
+the waveform shown in <a href="#Figure_4">Figure
+4</a>.
+</p>
+<p><a name="Figure_4"></a><img style="width: 241px; height: 100px;"
+ alt="Alpha Increasing &amp; Decreasing"
+ title="Alpha Increasing &amp; Decreasing" src="Behaviors4.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 4</i> &#8211; An Interpolator Set to a Loop
+Count of 1 with Mode Flags
+Set to Enable All Portions of the Waveform</b></font>
+</ul>
+<p>
+In <a href="#Figure_4">Figure
+4</a>,
+the alpha value is 0 before the combination of trigger time plus the
+phase delay duration. The alpha value changes from 0 to 1 over a
+specified period of time, remains at 1 for another specified period of
+time, then changes from 1 to 0 over a third specified period of time;
+thereafter the alpha value remains 0 (subject to the reprogramming of
+the interpolator's parameters). A possible use of an alpha-increasing
+value followed by an alpha-decreasing value might be to combine it with
+a rotation interpolator to program a door swinging open and then
+closing.
+</p>
+<p>By increasing the loop count, we can get
+repetitive behavior, such as a door swinging open and closed some
+number of times. At the extreme, we can specify a loop count of -1
+(representing infinity).
+</p>
+<p>We can construct looped versions of the waveforms shown in <a
+ href="#Figure_2">Figure
+2</a>, <a href="#Figure_3">Figure
+3</a>, and <a href="#Figure_4">Figure
+4</a>. <a href="#Figure_5">Figure
+5</a> shows a looping interpolator with mode flags set to enable
+only the alpha-increasing and alpha-at-1 portion of the waveform.
+</p>
+<p><a name="Figure_5"></a><img style="width: 500px; height: 99px;"
+ alt="Alpha Increasing Infinite Loop"
+ title="Alpha Increasing Infinite Loop" src="Behaviors5.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 5</i> &#8211; An Interpolator Set to Loop
+Infinitely and Mode Flags Set to Enable
+Only the Alpha-Increasing and Alpha-at-1 Portion of the Waveform</b></font>
+</ul>
+<p>
+In <a href="#Figure_5">Figure
+5</a>, alpha goes from 0 to 1 over a fixed duration of time, stays
+at 1 for another fixed duration of time, and then repeats.
+</p>
+<p>Similarly, <a href="#Figure_6">Figure
+6</a> shows a looping interpolator with mode flags set to enable
+only the alpha-decreasing and alpha-at-0 portion of the waveform.
+</p>
+<p><a name="Figure_6"></a><img style="width: 500px; height: 97px;"
+ alt="Alpha Decreasing Infinite Loop"
+ title="Alpha Decreasing Infinite Loop" src="Behaviors6.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 6</i> &#8211; An Interpolator Set to Loop
+Infinitely and Mode Flags Set to Enable
+Only the Alpha-Decreasing and Alpha-at-0 Portion of the Waveform</b></font>
+</ul>
+<p>
+Finally, <a href="#Figure_7">Figure
+7</a> shows a looping interpolator with both the increasing and
+decreasing portions of the waveform enabled.
+</p>
+<p>In all three cases shown by <a href="#Figure_5">Figure
+5</a>, <a href="#Figure_6">Figure
+6</a>, and <a href="#Figure_7">Figure
+7</a>, we can compute the exact value of alpha at any point in time.
+</p>
+<p><a name="Figure_7"></a><img style="width: 500px; height: 99px;"
+ alt="Alpha Increasing &amp; Decreasing Infinite Loop"
+ title="Alpha Increasing &amp; Decreasing Infinite Loop"
+ src="Behaviors7.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 7</i> &#8211; An Interpolator Set to Loop
+Infinitely and Mode Flags Set
+to Enable All Portions of the Waveform</b></font>
+</ul>
+<p>
+Java&nbsp;3D's preprogrammed behaviors permit other behaviors to change
+their parameters. When such a change occurs, the alpha value changes to
+match the state of the newly parameterized interpolator.
+</p>
+<h3>Acceleration of Alpha</h3>
+<p>Commonly, developers want alpha to change slowly at first and then
+to
+speed up until the change in alpha reaches some appropriate rate. This
+is analogous to accelerating your car up to the speed limit-it does not
+start off immediately at the speed limit. Developers specify this
+"ease-in, ease-out" behavior through two additional parameters, the <code>increasingAlphaRampDuration</code>
+and the <code>decreasing-AlphaRampDuration</code>.
+</p>
+<p>Each of these parameters specifies a period within the increasing or
+decreasing alpha duration region during which the "change in alpha" is
+accelerated (until it reaches its maximum per-unit-of-time step size)
+and then symmetrically decelerated. <a href="#Figure_8">Figure
+8</a> shows three general examples of how the <code>increasingAlphaRampDuration</code>
+method can be used to modify the alpha waveform. A value of 0 for the
+increasing ramp duration implies that <b>&#945;</b>
+is not accelerated; it changes at a constant rate. A value of 0.5 or
+greater (clamped to 0.5) for this increasing ramp duration implies that
+the change in <b>&#945;</b> is accelerated during the first half of the
+period and
+then decelerated during the second half of the period. For a value of <em>n</em>
+that is less than 0.5, alpha is accelerated for duration <em>n</em>,
+held constant for duration (1.0 - 2<em>n</em>), then decelerated for
+duration <em>n</em> of the period.
+</p>
+<p><a name="Figure_8"></a><img style="width: 500px; height: 354px;"
+ alt="Alpha acceleration" title="Alpha acceleration"
+ src="Behaviors8.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 8</i> &#8211; How an Alpha-Increasing Waveform
+Changes with Various
+Values of increasing-AlphaRampDuration</b></font>
+</ul>
</body>
</html>
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors1.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors1.gif
new file mode 100644
index 0000000..bb288ce
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors1.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors2.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors2.gif
new file mode 100644
index 0000000..005564f
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors2.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors3.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors3.gif
new file mode 100644
index 0000000..a8beb09
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors3.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors4.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors4.gif
new file mode 100644
index 0000000..685bcb7
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors4.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors5.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors5.gif
new file mode 100644
index 0000000..74783fb
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors5.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors6.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors6.gif
new file mode 100644
index 0000000..8614a4e
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors6.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors7.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors7.gif
new file mode 100644
index 0000000..0f2ce48
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors7.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Behaviors8.gif b/src/classes/share/javax/media/j3d/doc-files/Behaviors8.gif
new file mode 100644
index 0000000..d048cfa
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Behaviors8.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Concepts.html b/src/classes/share/javax/media/j3d/doc-files/Concepts.html
index 1e0d389..7b005af 100644
--- a/src/classes/share/javax/media/j3d/doc-files/Concepts.html
+++ b/src/classes/share/javax/media/j3d/doc-files/Concepts.html
@@ -271,8 +271,8 @@ component becomes live or is compiled. It is best to set capabilities
when you build your content. <a href="#Listing_4">Listing 4</a> shows
an example where we create a TransformGroup node and
enable it for writing.
-<p><font size="-1"><b><i><a name="Listing_4"></a>Listing 4</i> </b></font><font
- size="-1"><b>&#8211; C</b></font><font size="-1"><b>apabilities Example
+<p><font size="-1"><b><i><a name="Listing_4"></a>Listing 4</i> &#8211;
+Capabilities Example
</b></font></p>
<hr>
<pre>TransformGroup myTrans = new TransformGroup();<br>myTrans.setCapability(Transform.ALLOW_TRANSFORM_WRITE);<br></pre>
diff --git a/src/classes/share/javax/media/j3d/doc-files/Immediate.html b/src/classes/share/javax/media/j3d/doc-files/Immediate.html
index c6f27f4..101fe22 100644
--- a/src/classes/share/javax/media/j3d/doc-files/Immediate.html
+++ b/src/classes/share/javax/media/j3d/doc-files/Immediate.html
@@ -7,7 +7,108 @@
</head>
<body>
<h2>Immediate-Mode Rendering</h2>
-<p><br>
+<p>Java&nbsp;3D is fundamentally a scene graph-based API. Most of
+the constructs in the API are biased toward retained mode and
+compiled-retained mode rendering. However, there are some applications
+that want both the control and the flexibility that immediate-mode
+rendering offers.
</p>
+<p>Immediate-mode applications can either use or ignore Java&nbsp;3D's
+scene
+graph structure. By using immediate mode, end-user applications have
+more freedom, but this freedom comes at the expense of performance. In
+immediate mode, Java&nbsp;3D has no high-level information concerning
+graphical objects or their composition. Because it has minimal global
+knowledge, Java&nbsp;3D can perform only localized optimizations on
+behalf
+of the application programmer.
+</p>
+<p>
+</p>
+<h2>Two Styles of Immediate-Mode
+Rendering</h2>
+Use of Java&nbsp;3D's immediate mode falls into one of two categories:
+pure
+immediate-mode rendering and mixed-mode rendering in which immediate
+mode and retained or compiled-retained mode interoperate and render to
+the same canvas. The Java&nbsp;3D renderer is idle in pure immediate
+mode,
+distinguishing it from mixed-mode rendering.
+<h3>Pure Immediate-Mode
+Rendering</h3>
+Pure immediate-mode rendering provides for those applications and
+applets that do not want Java&nbsp;3D to do any automatic rendering of
+the
+scene graph. Such applications may not even wish to build a scene graph
+to represent their graphical data. However, they use Java&nbsp;3D's
+attribute objects to set graphics state and Java&nbsp;3D's geometric
+objects
+to render geometry.
+<hr noshade="noshade"><b>Note:</b> Scene antialiasing is not supported
+in pure immediate mode.
+<hr noshade="noshade">A pure immediate mode application must create a
+minimal set of Java&nbsp;3D
+objects before rendering. In addition to a Canvas3D object, the
+application must create a View object, with its associated PhysicalBody
+and PhysicalEnvironment objects, and the following scene graph
+elements: a VirtualUniverse object, a high-resolution Locale object, a
+BranchGroup node object, a TransformGroup node object with associated
+transform, and, finally, a ViewPlatform leaf node object that defines
+the position and orientation within the virtual universe that generates
+the view (see <a href="#Figure_1">Figure
+1</a>).
+<p><a name="Figure_1"></a><img style="width: 500px; height: 359px;"
+ alt="Minimal Immediate-Mode Structure"
+ title="Minimal Immediate-Mode Structure" src="Immediate1.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 1</i> &#8211; Minimal Immediate-Mode Structure</b></font>
+</ul>
+<p>
+Java&nbsp;3D provides utility functions that create much of this
+structure
+on behalf of a pure immediate-mode application, making it less
+noticeable from the application's perspective-but the structure must
+exist.
+</p>
+<p>All rendering is done completely under user control. It is necessary
+for the user to clear the 3D canvas, render all geometry, and swap the
+buffers. Additionally, rendering the right and left eye for stereo
+viewing becomes the sole responsibility of the application.
+</p>
+<p>In pure immediate mode, the user must stop the Java&nbsp;3D
+renderer, via
+the Canvas3D object <code>stopRenderer()</code>
+method, prior to adding the Canvas3D object to an active View object
+(that is, one that is attached to a live ViewPlatform object).
+</p>
+<p>
+</p>
+<h3>Mixed-Mode Rendering</h3>
+Mixing immediate mode and retained or compiled-retained mode requires
+more structure than pure immediate mode. In mixed mode, the
+Java&nbsp;3D
+renderer is running continuously, rendering the scene graph into the
+canvas.
+<p>The basic Java&nbsp;3D <em>stereo</em> rendering loop, executed for
+each
+Canvas3D, is as follows:
+</p>
+<pre><hr><br>clear canvas (both eyes)<br></pre>
+<pre>call preRender() <strong><kbd>// user-supplied method<br></kbd></strong>set left eye view<br>render opaque scene graph objects<br>call renderField(FIELD_LEFT) <strong><kbd>// user-supplied method<br></kbd></strong>render transparent scene graph objects<br>set right eye view<br>render opaque scene graph objects again<br>call renderField(FIELD_RIGHT) <strong><kbd>// user-supplied method<br></kbd></strong>render transparent scene graph objects again<br>call postRender() <strong><kbd>// user-supplied method<br></kbd></strong>synchronize and swap buffers<br></pre>
+<pre>call postSwap() <strong><kbd>// user-supplied method<br></kbd></strong><br><hr></pre>
+The basic Java&nbsp;3D <em>monoscopic</em> rendering loop is as
+follows:
+<pre><hr><br>clear canvas<br></pre>
+<pre>call preRender() <strong><kbd>// user-supplied method<br></kbd></strong>set view<br>render opaque scene graph objects<br>call renderField(FIELD_ALL) <strong><kbd>// user-supplied method<br></kbd></strong>render transparent scene graph objects<br>call postRender() <strong><kbd>// user-supplied method<br></kbd></strong>synchronize and swap buffers<br></pre>
+<pre>call postSwap() <strong><kbd>// user-supplied method<br></kbd></strong><br><hr></pre>
+In both cases, the entire loop, beginning with clearing the canvas and
+ending with swapping the buffers, defines a frame. The application is
+given the opportunity to render immediate-mode geometry at any of the
+clearly identified spots in the rendering loop. A user specifies his or
+her own rendering methods by extending the Canvas3D class and
+overriding the <code>preRender</code>, <code>postRender</code>, <code>postSwap</code>,
+and/or <code>renderField</code> methods.
</body>
</html>
diff --git a/src/classes/share/javax/media/j3d/doc-files/Immediate1.gif b/src/classes/share/javax/media/j3d/doc-files/Immediate1.gif
new file mode 100644
index 0000000..2d549b1
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/Immediate1.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/Rendering.html b/src/classes/share/javax/media/j3d/doc-files/Rendering.html
index a38f6eb..7415ce8 100644
--- a/src/classes/share/javax/media/j3d/doc-files/Rendering.html
+++ b/src/classes/share/javax/media/j3d/doc-files/Rendering.html
@@ -7,7 +7,142 @@
</head>
<body>
<h2>Execution and Rendering Model</h2>
-<p><br>
+<p>Java&nbsp;3D's execution and rendering model assumes the
+existence of a <a href="../VirtualUniverse.html">VirtualUniverse</a>
+object and an attached scene graph. This
+scene graph can be minimal and not noticeable from an application's
+perspective when using immediate-mode rendering, but it must exist.
+</p>
+<p>Java&nbsp;3D's execution model intertwines with its rendering modes
+and
+with
+behaviors and their scheduling. This chapter first describes the three
+rendering modes, then describes how an application starts up a
+Java&nbsp;3D
+environment, and finally it discusses how the various rendering modes
+work within this framework.
+</p>
+<p>
+</p>
+<h2>Three Major Rendering Modes</h2>
+<p>Java&nbsp;3D supports three different modes for rendering scenes:
+immediate
+mode, retained mode, and compiled-retained mode. These three levels of
+API support represent a potentially large variation in graphics
+processing speed and in on-the-fly restructuring.
+</p>
+<p></p>
+<h3>Immediate Mode</h3>
+<p>Immediate mode allows maximum flexibility at some cost in rendering
+speed. The application programmer can either use or ignore the scene
+graph structure inherent in Java&nbsp;3D's design. The programmer can
+choose
+to draw geometry directly or to define a scene graph. Immediate mode
+can be either used independently or mixed with retained and/or
+compiled-retained mode rendering. The immediate-mode API is described
+in the "<a href="Immediate.html">Immediate-Mode Rendering</a>" section.</p>
+<p>
+</p>
+<h3>Retained Mode</h3>
+<p>Retained mode allows a great deal of the flexibility provided by
+immediate mode while also providing a substantial increase in rendering
+speed. All objects defined in the scene graph are accessible and
+manipulable. The scene graph itself is fully manipulable. The
+application programmer can rapidly construct the scene graph, create
+and delete nodes, and instantly "see" the effect of edits. Retained
+mode also allows maximal access to objects through a general pick
+capability.
+</p>
+<p>Java&nbsp;3D's retained mode allows a programmer to construct
+objects,
+insert objects into a database, compose objects, and add behaviors to
+objects.
+</p>
+<p>In retained mode, Java&nbsp;3D knows that the programmer has defined
+objects, knows how the programmer has combined those objects into
+compound objects or scene graphs, and knows what behaviors or actions
+the programmer has attached to objects in the database. This knowledge
+allows Java&nbsp;3D to perform many optimizations. It can construct
+specialized data structures that hold an object's geometry in a manner
+that enhances the speed at which the Java&nbsp;3D system can render it.
+It
+can compile object behaviors so that they run at maximum speed when
+invoked. It can flatten transformation manipulations and state changes
+where possible in the scene graph.
+</p>
+<p>
+</p>
+<h3>Compiled-Retained Mode</h3>
+<p>Compiled-retained mode allows the Java&nbsp;3D API to perform an
+arbitrarily
+complex series of optimizations including, but not restricted to,
+geometry compression, scene graph flattening, geometry grouping, and
+state change clustering.
+</p>
+<p>Compiled-retained mode provides hooks for end-user manipulation and
+picking. Pick operations return the closest object (in scene graph
+space) associated with the picked geometry.
+</p>
+<p>Java&nbsp;3D's compiled-retained mode ensures effective graphics
+rendering
+speed in yet one more way. A programmer can request that Java&nbsp;3D
+compile an object or a scene graph. Once it is compiled, the programmer
+has minimal access to the internal structure of the object or scene
+graph. Capability flags provide access to specified components that the
+application program may need to modify on a continuing basis.
+</p>
+<p>A compiled object or scene graph consists of whatever internal
+structures Java&nbsp;3D wishes to create to ensure that objects or
+scene
+graphs render at maximal rates. Because Java&nbsp;3D knows that the
+majority
+of the compiled object's or scene graph's components will not change,
+it can perform an extraordinary number of optimizations, including the
+fusing of multiple objects into one conceptual object, turning an
+object into compressed geometry or even breaking an object up into
+like-kind components and reassembling the like-kind components into new
+"conceptual objects."
+</p>
+<p>
+</p>
+<h2>Instantiating the Render Loop</h2>
+<p>From an application's perspective, Java&nbsp;3D's render loop runs
+continuously. Whenever an application adds a scene branch to the
+virtual world, that scene branch is instantly visible. This high-level
+view of the render loop permits concurrent implementations of
+Java&nbsp;3D
+as well as serial implementations. The remainder of this section
+describes the Java&nbsp;3D render loop bootstrap process from a
+serialized
+perspective. Differences that would appear in concurrent
+implementations are noted as well.
+</p>
+<p></p>
+<h3>An Application-Level
+Perspective</h3>
+<p>First the application must construct its scene graphs. It does this
+by
+constructing scene graph nodes and component objects and linking them
+into self-contained trees with a BranchGroup node as a root. The
+application next must obtain a reference to any constituent nodes or
+objects within that branch that it may wish to manipulate. It sets the
+capabilities of all the objects to match their anticipated use and only
+then compiles the branch using the BranchGroup's <code>compile</code>
+method. Whether it compiles the branch, the application can add it to
+the virtual universe by adding the BranchGroup to a Locale object. The
+application repeats this process for each branch it wishes to create.
+Note that for concurrent Java&nbsp;3D implementations, whenever an
+application adds a branch to the active virtual universe, that branch
+becomes visible.
+</p>
+<p></p>
+<h3>Retained and
+Compiled-Retained Rendering Modes</h3>
+<p>This initialization process is identical for retained and
+compiled-retained modes. In both modes, the application builds a scene
+graph. In compiled-retained mode, the application compiles the scene
+graph. Then the application inserts the (possibly compiled) scene graph
+into the virtual universe.
</p>
</body>
</html>
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing.html b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing.html
index 478ea4d..ff80cb4 100644
--- a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing.html
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing.html
@@ -7,7 +7,244 @@
</head>
<body>
<h2>Reusing Scene Graphs</h2>
-<p><br>
+<p>
+Java&nbsp;3D provides application programmers
+with two different means for reusing scene graphs. First, multiple
+scene graphs can share a common subgraph. Second, the node hierarchy of
+a common subgraph can be cloned, while still sharing large component
+objects such as geometry and texture objects. In the first case,
+changes in the shared subgraph affect all scene graphs that refer to
+the shared subgraph. In the second case, each instance is unique-a
+change in one instance does not affect any other instance.
</p>
+<h2>Sharing Subgraphs</h2>
+<p>An application that wishes to share a subgraph from multiple places
+in
+a scene graph must do so through the use of the <a href="../Link.html">Link</a>
+leaf node and an
+associated <a href="../SharedGroup.html">SharedGroup</a> node. The
+SharedGroup node serves as the root of
+the shared subgraph. The Link leaf node refers to the SharedGroup node.
+It does not incorporate the shared scene graph directly into its scene
+graph.
+</p>
+<p>A SharedGroup node allows multiple Link leaf nodes to share its
+subgraph as shown in <a href="#Figure_1">Figure
+1</a> below.<br>
+</p>
+<p><a name="Figure_1"></a><img style="width: 500px; height: 476px;"
+ alt="Sharing a Subgraph" title="Sharing a Subgraph"
+ src="SceneGraphSharing1.gif">
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 1</i> &#8211; Sharing a Subgraph</b></font>
+</ul>
+<h2>Cloning Subgraphs</h2>
+<p>An application developer may wish to reuse a common subgraph without
+completely sharing that subgraph. For example, the developer may wish
+to create a parking lot scene consisting of multiple cars, each with a
+different color. The developer might define three basic types of cars,
+such as convertible, truck, and sedan. To create the parking lot scene,
+the application will instantiate each type of car several times. Then
+the application can change the color of the various instances to create
+more variety in the scene. Unlike shared subgraphs, each instance is a
+separate copy of the scene graph definition: Changes to one instance do
+not affect any other instance.
+</p>
+<p>Java&nbsp;3D provides the <a href="../Node.html#cloneTree%28%29"><code>cloneTree</code></a>
+method for this
+purpose. The <code>cloneTree</code>
+method allows the programmer to change some attributes (NodeComponent
+objects) in a scene graph, while at the same time sharing the majority
+of the scene graph data-the geometry.
+</p>
+<h3>References to Node Component
+Objects</h3>
+<p>When <code>cloneTree</code> reaches a leaf node,
+there are two possible actions for handling the leaf node's
+NodeComponent objects (such as Material, Texture, and so forth). First,
+the cloned leaf node can reference the original leaf node's
+NodeComponent object-the NodeComponent object itself is not duplicated.
+Since the cloned leaf node shares the NodeComponent object with the
+original leaf node, changing the data in the NodeComponent object will
+effect a change in both nodes. This mode would also be used for objects
+that are read-only at run time.
+</p>
+<p>Alternatively, the NodeComponent object can be duplicated, in which
+case the new leaf node would reference the duplicated object. This mode
+allows data referenced by the newly created leaf node to be modified
+without that modification affecting the original leaf node.
+</p>
+<p><a href="#Figure_2">Figure
+2</a>
+shows two instances of NodeComponent objects that are shared and one
+NodeComponent element that is duplicated for the cloned subgraph.
+</p>
+<p><a name="Figure_2"></a><img style="width: 499px; height: 287px;"
+ alt="Referenced and Duplicated NodeComponent Objects"
+ title="Referenced / Duplicated NodeComponens"
+ src="SceneGraphSharing2.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 2</i> &#8211; Referenced and Duplicated
+NodeComponent Objects</b></font>
+</ul>
+<h3>References to Other Scene
+Graph Nodes</h3>
+Leaf nodes that contain references to other nodes
+(for example, Light nodes reference a Group node) can create a problem
+for the <code>cloneTree</code> method. After the <code>cloneTree</code>
+operation is performed, the reference in the cloned leaf node will
+still refer to the node in the original subgraph-a situation that is
+most likely incorrect (see <a href="#Figure_3">Figure
+3</a>).
+<p>To handle these ambiguities, a callback mechanism is provided.
+</p>
+<p><a name="Figure_3"></a><img style="width: 499px; height: 240px;"
+ alt="References to Other Scene Graph Nodes"
+ title="References to Other Nodes" src="SceneGraphSharing3.gif">
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 3</i> &#8211; References to Other Scene Graph
+Nodes</b></font>
+</ul>
+<p>
+A leaf node that needs to update referenced nodes upon being duplicated
+by a call to <code>cloneTree</code> must implement the <code>updateNodeReferences</code>
+method. By using this method, the cloned leaf node can determine if any
+nodes referenced by it have been duplicated and, if so, update the
+appropriate references to their cloned counterparts.
+</p>
+<p>Suppose, for instance, that the leaf node Lf1 in <a href="#Figure_3">Figure
+3</a> implemented the <code>updateNodeReferences</code> method. Once
+all nodes had been duplicated, the <code>clone-Tree</code> method
+would then call each cloned leaf's node <code>updateNodeReferences</code>
+method. When cloned leaf node Lf2's method was called, Lf2 could ask if
+the node N1 had been duplicated during the <code>cloneTree</code>
+operation. If the node had been duplicated, leaf Lf2 could then update
+its internal state with the cloned node, N2 (see <a href="#Figure_4">Figure
+4</a>).
+</p>
+<p><a name="Figure_4"></a><img style="width: 499px; height: 190px;"
+ alt="Updated Subgraph after updateNodeReferences Call"
+ title="Subgraph after updateNodeReferences"
+ src="SceneGraphSharing4.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 4</i> &#8211; Updated Subgraph after
+updateNodeReferences Call</b></font>
+</ul>
+<p>
+All predefined Java&nbsp;3D nodes will automatically have their <code>updateNodeReferences</code>
+method defined. Only subclassed nodes that reference other nodes need
+to have this method overridden by the user.
+</p>
+<h3>Dangling References</h3>
+Because <code>cloneTree</code> is able to start
+the cloning operation from any node, there is a potential for creating
+dangling references. A dangling reference can occur only when a leaf
+node that contains a reference to another scene graph node is cloned.
+If the referenced node is not cloned, a dangling reference situation
+exists: There are now two leaf nodes that access the same node (<a
+ href="#Figure_5">Figure
+5</a>). A dangling reference is discovered when a leaf node's <code>updateNodeReferences</code>
+method calls the <code>getNewNodeReference</code> method and the
+cloned subgraph does not contain a counterpart to the node being looked
+up.
+<p><a name="Figure_5"></a><img style="width: 499px; height: 232px;"
+ alt="Dangling Reference" title="Dangling Reference"
+ src="SceneGraphSharing5.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 5</i> &#8211; Dangling Reference: Bold Nodes
+Are Being Cloned</b></font>
+</ul>
+<p>
+When a dangling reference is discovered, <code>cloneTree</code> can
+handle it in one of two ways. If <code>cloneTree</code> is called
+without the <code>allowDanglingReferences</code> parameter set to <code>true</code>,
+a dangling reference will result in a <code>DanglingReferenceException</code>
+being thrown. The user can catch this exception if desired. If <code>cloneTree</code>
+is called with the <code>allowDanglingReferences</code> parameter set
+to <code>true</code>, the <code>update-NodeReferences</code> method
+will return a reference to the same object passed into the <code>getNewNodeReference</code>
+method. This will result in the <code>cloneTree</code> operation
+completing with dangling references, as in <a href="#Figure_5">Figure
+5</a>.
+</p>
+<h3>Subclassing Nodes</h3>
+All Java&nbsp;3D predefined nodes (for example, Interpolators and LOD
+nodes)
+automatically handle all node reference and duplication operations.
+When a user subclasses a Leaf object or a NodeComponent object, certain
+methods must be provided in order to ensure the proper operation of <code>cloneTree</code>.
+<p>Leaf node subclasses (for example, Behaviors) that contain any user
+node-specific data that needs to be duplicated during a <code>cloneTree</code>
+operation must define the following two methods:
+</p>
+<pre><b>Node cloneNode(boolean forceDuplicate);<br>void duplicateNode(Node n, boolean forceDuplicate)<br></b></pre>
+The <code>cloneNode</code> method consists of three lines:
+<pre><hr><br><code>UserSubClass usc = new UserSubClass();<br>usc.duplicateNode(this, forceDuplicate);</code><br>return usc;<br><br><hr></pre>
+The <code>duplicateNode</code> method must first call <code>super.duplicateNode</code>
+before duplicating any necessary user-specific data or setting any
+user-specific state.
+<p>NodeComponent subclasses that contain any user node-specific data
+must define the following two methods:
+</p>
+<pre><b>NodeComponent cloneNodeComponent();<br>void duplicateNodeComponent(NodeComponent nc, boolean forceDuplicate);<br></b></pre>
+The <code>cloneNodeComponent</code> method consists of three lines:
+<pre><hr><br><code>UserNodeComponent unc = new UserNodeComponent();<br>unc.duplicateNodeComponent(this, forceDuplicate);</code><br>return un;<br><br><hr></pre>
+The <code>duplicateNodeComponent</code> must first call <code>super.duplicateNodeComponent</code>
+and then can duplicate any user-specific data or set any user-specific
+state as necessary.
+<h3>NodeReferenceTable Object</h3>
+The NodeReferenceTable object is used by a leaf node's <code>updateNodeReferences</code>
+method called by the <code>cloneTree</code>
+operation. The NodeReferenceTable maps nodes from the original subgraph
+to the new nodes in the cloned subgraph. This information can than be
+used to update any cloned leaf node references to reference nodes in
+the cloned subgraph. This object can be created only by Java&nbsp;3D.
+<h3>Example: User Behavior Node</h3>
+The following is an example of a user-defined Behavior object to show
+properly how to define a node to be compatible with the <code>cloneTree</code>
+operation.
+<hr>
+<pre>class RotationBehavior extends Behavior {<br> TransformGroup objectTransform;<br> WakeupOnElapsedFrames w;<br></pre>
+<pre> Matrix4d rotMat = new Matrix4d();<br> Matrix4d objectMat = new Matrix4d();<br> Transform3D t = new Transform3D();<br></pre>
+<pre><i> // Override Behavior's initialize method to set up wakeup<br> // criteria<br></i></pre>
+<pre> public void initialize() {<br></pre>
+<pre><i> // Establish initial wakeup criteria<br></i></pre>
+<pre> wakeupOn(w);<br> }<br></pre>
+<pre><i> // Override Behavior's stimulus method to handle the event<br></i></pre>
+<pre> public void processStimulus(Enumeration criteria) {<br></pre>
+<pre><i> // Rotate by another PI/120.0 radians<br></i></pre>
+<pre> objectMat.mul(objectMat, rotMat);<br> t.set(objectMat);<br> objectTransform.setTransform(t);<br></pre>
+<pre><i> // Set wakeup criteria for next time<br></i></pre>
+<pre> wakeupOn(w);<br> }<br></pre>
+<pre><i> // Constructor for rotation behavior.<br></i></pre>
+<pre> public RotationBehavior(TransformGroup tg, int numFrames) {<br> w = new WakeupOnElapsedFrames(numFrames);<br> objectTransform = tg;<br></pre>
+<pre><i> objectMat.setIdentity();<br></i></pre>
+<pre><i> // Create a rotation matrix that rotates PI/120.0<br> // radians per frame<br> rotMat.rotX(Math.PI/120.0);<br></i></pre>
+<pre><i> // Note: When this object is duplicated via cloneTree,<br> // the cloned RotationBehavior node needs to point to<br> // the TransformGroup in the just-cloned tree. <br> }<br></i></pre>
+<pre><i> // Sets a new TransformGroup.<br></i></pre>
+<pre> public void setTransformGroup(TransformGroup tg) {<br> objectTransform = tg;<br></pre>
+<pre><i> }<br></i></pre>
+<pre><i> // The next two methods are needed for cloneTree to operate<br> // correctly.<br> // cloneNode is needed to provide a new instance of the user<br> // derived subclass.<br></i></pre>
+<pre> public Node cloneNode(boolean forceDuplicate) {<br></pre>
+<pre><i> // Get all data from current node needed for<br> // the constructor<br> int numFrames = w.getElapsedFrameCount();<br></i></pre>
+<pre> RotationBehavior r =<br> new RotationBehavior(objectTransform, numFrames);<br> r.duplicateNode(this, forceDuplicate);<br> return r;<br> }<br></pre>
+<pre><i> // duplicateNode is needed to duplicate all super class<br> // data as well as all user data.<br></i></pre>
+<pre> public void duplicateNode(Node originalNode, boolean <br> forceDuplicate) {<br> super.duplicateNode(originalNode, forceDuplicate);<br></pre>
+<pre><i> // Nothing to do here - all unique data was handled<br> // in the constructor in the cloneNode routine.<br> }<br></i></pre>
+<pre><i> // Callback for when this leaf is cloned. For this object<br> // we want to find the cloned TransformGroup node that this<br> // clone Leaf node should reference.<br></i></pre>
+<pre> public void updateNodeReferences(NodeReferenceTable t) {<br></pre>
+<pre><i> super.updateNodeReferences(t);<br></i></pre>
+<pre><i> // Update node's TransformGroup to proper reference<br></i></pre>
+<pre> TransformGroup newTg =<br> (TransformGroup)t.getNewObjectReference(<br> objectTransform);<br> setTransformGroup(newTg);<br> }<br>}<br></pre>
</body>
</html>
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing1.gif b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing1.gif
new file mode 100644
index 0000000..f6ca47c
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing1.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing2.gif b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing2.gif
new file mode 100644
index 0000000..c062c81
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing2.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing3.gif b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing3.gif
new file mode 100644
index 0000000..325cab1
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing3.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing4.gif b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing4.gif
new file mode 100644
index 0000000..78aeaab
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing4.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing5.gif b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing5.gif
new file mode 100644
index 0000000..2ff6547
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/SceneGraphSharing5.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel.html b/src/classes/share/javax/media/j3d/doc-files/ViewModel.html
index 991ebfc..3cc9ece 100644
--- a/src/classes/share/javax/media/j3d/doc-files/ViewModel.html
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel.html
@@ -7,7 +7,1058 @@
</head>
<body>
<h2>View Model</h2>
-<p><br>
+<p>Java&nbsp;3D introduces a new view model that takes Java's
+vision of "write once, run anywhere" and generalizes it to include
+display devices and six-degrees-of-freedom input peripherals such as
+head trackers. This "write once, view everywhere" nature of the new
+view model means that an application or applet written using the Java
+3D view model can render images to a broad range of display devices,
+including standard computer displays, multiple-projection display
+rooms, and head-mounted displays, without modification of the scene
+graph. It also means that the same application, once again without
+modification, can render stereoscopic views and can take advantage of
+the input from a head tracker to control the rendered view.
</p>
+<p>Java&nbsp;3D's view model achieves this versatility by cleanly
+separating
+the virtual and the physical world. This model distinguishes between
+how an application positions, orients, and scales a ViewPlatform object
+(a viewpoint) within the virtual world and how the Java&nbsp;3D
+renderer
+constructs the final view from that viewpoint's position and
+orientation. The application controls the ViewPlatform's position and
+orientation; the renderer computes what view to render using this
+position and orientation, a description of the end-user's physical
+environment, and the user's position and orientation within the
+physical environment.
+</p>
+<p>This document first explains why Java&nbsp;3D chose a different view
+model
+and some of the philosophy behind that choice. It next describes how
+that model operates in the simple case of a standard computer screen
+without head tracking&#8212;the most common case. Finally, it presents
+advanced material that was originally published in Appendix C of the
+API specification guide.
+</p>
+<p>
+</p>
+<h2>Why a New Model?</h2>
+<p>Camera-based view models, as found in low-level APIs, give
+developers
+control over all rendering parameters. This makes sense when dealing
+with custom applications, less sense when dealing with systems that
+wish to have broader applicability: systems such as viewers or browsers
+that load and display whole worlds as a single unit or systems where
+the end users view, navigate, display, and even interact with the
+virtual world.
+</p>
+<p>Camera-based view models emulate a camera in the virtual world, not
+a
+human in a virtual world. Developers must continuously reposition a
+camera to emulate "a human in the virtual world."
+</p>
+<p>The Java&nbsp;3D view model incorporates head tracking directly, if
+present,
+with no additional effort from the developer, thus providing end users
+with the illusion that they actually exist inside a virtual world.
+</p>
+<p>The Java&nbsp;3D view model, when operating in a non-head-tracked
+environment and rendering to a single, standard display, acts very much
+like a traditional camera-based view model, with the added
+functionality of being able to generate stereo views transparently.
+</p>
+<p>
+</p>
+<h3>The Physical Environment
+Influences the View</h3>
+<p>Letting the application control all viewing parameters is not
+reasonable in systems in which the physical environment dictates some
+of the view parameters.
+</p>
+<p>One example of this is a head-mounted display (HMD), where the
+optics
+of the head-mounted display directly determine the field of view that
+the application should use. Different HMDs have different optics,
+making it unreasonable for application developers to hard-wire such
+parameters or to allow end users to vary that parameter at will.
+</p>
+<p>Another example is a system that automatically computes view
+parameters
+as a function of the user's current head position. The specification of
+a world and a predefined flight path through that world may not exactly
+specify an end-user's view. HMD users would expect to look and thus see
+to their left or right even when following a fixed path through the
+environment-imagine an amusement park ride with vehicles that follow
+fixed paths to present content to their visitors, but visitors can
+continue to move their heads while on those rides.
+</p>
+<p>Depending on the physical details of the end-user's environment, the
+values of the viewing parameters, particularly the viewing and
+projection matrices, will vary widely. The factors that influence the
+viewing and projection matrices include the size of the physical
+display, how the display is mounted (on the user's head or on a table),
+whether the computer knows the user's head location in three space, the
+head mount's actual field of view, the display's pixels per inch, and
+other such parameters. For more information, see "<a
+ href="#View_Model_Details">View Model Details</a>."
+</p>
+<p>
+</p>
+<h2>Separation of Physical and
+Virtual</h2>
+<p>The Java&nbsp;3D view model separates the virtual environment, where
+the
+application programmer has placed objects in relation to one another,
+from the physical environment, where the user exists, sees computer
+displays, and manipulates input devices.
+</p>
+<p>Java&nbsp;3D also defines a fundamental correspondence between the
+user's
+physical world and the virtual world of the graphic application. This
+physical-to-virtual-world correspondence defines a single common space,
+a space where an action taken by an end user affects objects within the
+virtual world and where any activity by objects in the virtual world
+affects the end user's view.
+</p>
+<p>
+</p>
+<h3>The Virtual World</h3>
+<p>The virtual world is a common space in which virtual objects exist.
+The
+virtual world coordinate system exists relative to a high-resolution
+Locale-each Locale object defines the origin of virtual world
+coordinates for all of the objects attached to that Locale. The Locale
+that contains the currently active ViewPlatform object defines the
+virtual world coordinates that are used for rendering. Java3D
+eventually transforms all coordinates associated with scene graph
+elements into this common virtual world space.
+</p>
+<h3>The Physical World</h3>
+<p>The physical world is just that-the real, physical world. This is
+the
+space in which the physical user exists and within which he or she
+moves his or her head and hands. This is the space in which any
+physical trackers define their local coordinates and in which several
+calibration coordinate systems are described.
+</p>
+<p>The physical world is a space, not a common coordinate system
+between
+different execution instances of Java&nbsp;3D. So while two different
+computers at two different physical locations on the globe may be
+running at the same time, there is no mechanism directly within
+Java&nbsp;3D
+to relate their local physical world coordinate systems with each
+other. Because of calibration issues, the local tracker (if any)
+defines the local physical world coordinate system known to a
+particular instance of Java&nbsp;3D.
+</p>
+<p>
+</p>
+<h2>The Objects That Define the
+View</h2>
+<p>Java&nbsp;3D distributes its view model parameters across several
+objects,
+specifically, the View object and its associated component objects, the
+PhysicalBody object, the PhysicalEnvironment object, the Canvas3D
+object, and the Screen3D object. <a href="#Figure_1">Figure
+1</a> shows graphically the central role of the View object and the
+subsidiary role of its component objects.
+</p>
+<p><a name="Figure_1"></a><img style="width: 500px; height: 355px;"
+ alt="View Object + Other Components"
+ title="View Object + Other Components" src="ViewModel1.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 1</i> &#8211; View Object, Its Component
+Objects, and Their
+Interconnection</b></font>
+</ul>
+<p>
+The view-related objects shown in <a href="#Figure_1">Figure
+1</a>
+and their roles are as follows. For each of these objects, the portion
+of the API that relates to modifying the virtual world and the portion
+of the API that is relevant to non-head-tracked standard display
+configurations are derived in this chapter. The remainder of the
+details are described in "<a href="#View_Model_Details">View Model
+Details</a>."
+</p>
+<ul>
+ <li><a href="../ViewPlatform.html"><em>ViewPlatform</em></a>: A leaf
+node that locates a view within a
+scene graph. The ViewPlatform's parents specify its location,
+orientation, and scale within the virtual universe. See "<a
+ href="#ViewPlatform_Place">ViewPlatform: A Place in the Virtual World</a>,"
+for more
+information. </li>
+</ul>
+<ul>
+ <li><a href="../View.html"><em>View</em></a>: The main view object.
+It contains many pieces of
+view state.</li>
+</ul>
+<ul>
+ <li><a href="../Canvas3D.html"><em>Canvas3D</em></a>: The 3D version
+of the Abstract Windowing
+Toolkit
+(AWT) Canvas object. It represents a window in which Java&nbsp;3D will
+draw
+images. It contains a reference to a Screen3D object and information
+describing the Canvas3D's size, shape, and location within the Screen3D
+object.</li>
+</ul>
+<ul>
+ <li><a href="../Screen3D.html"><em>Screen3D</em></a>: An object that
+contains information describing
+the display screen's physical properties. Java&nbsp;3D places
+display-screen
+information in a separate object to prevent the duplication of screen
+information within every Canvas3D object that shares a common screen.</li>
+</ul>
+<ul>
+ <li><a href="../PhysicalBody.html">PhysicalBody</a>: An object that
+contains calibration information
+describing the user's physical body.</li>
+</ul>
+<ul>
+ <li><a href="../PhysicalEnvironment.html">PhysicalEnvironment</a>: An
+object that contains calibration
+information describing the physical world, mainly information that
+describes the environment's six-degrees-of freedom tracking hardware,
+if present.</li>
+</ul>
+<p>Together, these objects describe the geometry of viewing rather than
+explicitly providing a viewing or projection matrix. The Java&nbsp;3D
+renderer uses this information to construct the appropriate viewing and
+projection matrices. The geometric focus of these view objects provides
+more flexibility in generating views-a flexibility needed to support
+alternative display configurations.
+</p>
+<h2><a name="ViewPlatform_Place"></a>ViewPlatform: A Place in the
+Virtual World</h2>
+<p>A ViewPlatform leaf node defines a coordinate system, and thus a
+reference frame with its associated origin or reference point, within
+the virtual world. The ViewPlatform serves as a point of attachment for
+View objects and as a base for determining a renderer's view.
+</p>
+<p><a href="#Figure_2">Figure
+2</a>
+shows a portion of a scene graph containing a ViewPlatform node. The
+nodes directly above a ViewPlatform determine where that ViewPlatform
+is located and how it is oriented within the virtual world. By
+modifying the Transform3D object associated with a TransformGroup node
+anywhere directly above a ViewPlatform, an application or behavior can
+move that ViewPlatform anywhere within the virtual world. A simple
+application might define one TransformGroup node directly above a
+ViewPlatform, as shown in <a href="#Figure_2">Figure
+2</a>.
+</p>
+<p>A VirtualUniverse may have many different ViewPlatforms, but a
+particular View object can attach itself only to a single ViewPlatform.
+Thus, each rendering onto a Canvas3D is done from the point of view of
+a single ViewPlatform.
+</p>
+<p><a name="Figure_2"></a><img style="width: 500px; height: 359px;"
+ alt="View Platform Branch Graph" title="View Platform Branch Graph"
+ src="ViewModel2.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 2</i> &#8211; A Portion of a Scene Graph
+Containing a ViewPlatform Object</b></font>
+</ul>
+<p>
+</p>
+<h3>Moving through the Virtual
+World</h3>
+<p>An application navigates within the virtual world by modifying a
+ViewPlatform's parent TransformGroup. Examples of applications that
+modify a ViewPlatform's location and orientation include browsers,
+object viewers that provide navigational controls, applications that do
+architectural walkthroughs, and even search-and-destroy games.
+</p>
+<p>Controlling the ViewPlatform object can produce very interesting and
+useful results. Our first simple scene graph (see <a
+ href="intro.html#Figure_1">"Introduction," Figure 1</a>)
+defines a scene graph for a simple application that draws an object in
+the center of a window and rotates that object about its center point.
+In that figure, the Behavior object modifies the TransformGroup
+directly above the Shape3D node.
+</p>
+<p>An alternative application scene graph, shown in <a href="#Figure_3">Figure
+3</a>,
+leaves the central object alone and moves the ViewPlatform around the
+world. If the shape node contains a model of the earth, this
+application could generate a view similar to that seen by astronauts as
+they orbit the earth.
+</p>
+<p>Had we populated this world with more objects, this scene graph
+would allow navigation through the world via the Behavior node.
+</p>
+<p><a name="Figure_3"></a><img style="width: 500px; height: 289px;"
+ alt="Simple Scene Graph with View Control"
+ title="Simple Scene Graph with View Control" src="ViewModel3.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 3</i> &#8211; A Simple Scene Graph with View
+Control</b></font>
+</ul>
+<p>
+Applications and behaviors manipulate a <a
+ href="../TransformGroup.html">TransformGroup</a> through its
+access methods. These methods allow an application to retrieve and
+set the Group node's Transform3D object. Transform3D Node methods
+include <code>getTransform</code> and <code>setTransform</code>.
+</p>
+<p>
+</p>
+<h3>Dropping in on a Favorite
+Place</h3>
+<p>A scene graph may contain multiple <a href="../ViewPlatform.html">ViewPlatform</a>
+objects. If a user detaches a <a href="../View.html">View</a> object
+from a ViewPlatform and then
+reattaches that View to a different ViewPlatform, the image on the
+display will now be rendered from the point of view of the new
+ViewPlatform.</p>
+<h3>Associating Geometry with a
+ViewPlatform</h3>
+<p>Java&nbsp;3D does not have any built-in semantics for displaying a
+visible
+manifestation of a ViewPlatform within the virtual world (an <em>avatar</em>).
+However, a developer can construct and manipulate an avatar using
+standard Java&nbsp;3D constructs.
+</p>
+<p>A developer can construct a small scene graph consisting of a
+TransformGroup node, a behavior leaf node, and a shape node and insert
+it directly under the BranchGroup node associated with the ViewPlatform
+object. The shape node would contain a geometric model of the avatar's
+head. The behavior node would change the TransformGroup's transform
+periodically to the value stored in a View object's <code>UserHeadToVworld</code><strong>
+</strong>parameter (see "<a href="#View_Model_Details">View Model
+Details</a>").
+The avatar's virtual head, represented by the shape node, will now move
+around in lock-step with the ViewPlatform's TransformGroup<em> and </em>any
+relative position and orientation changes of the user's actual physical
+head (if a system has a head tracker).
+</p>
+<p>
+</p>
+<h2><a name="Generating_View"></a>Generating a View</h2>
+<p>Java&nbsp;3D generates viewing matrices in one of a few different
+ways,
+depending on whether the end user has a head-mounted or a room-mounted
+display environment and whether head tracking is enabled. This section
+describes the computation for a non-head-tracked, room-mounted
+display-a standard computer display. Other environments are described
+in "<a href="#View_Model_Details">View Model Details</a>."
+</p>
+<p>In the absence of head tracking, the ViewPlatform's origin specifies
+the virtual eye's location and orientation within the virtual world.
+However, the eye location provides only part of the information needed
+to render an image. The renderer also needs a projection matrix. In the
+default mode, Java&nbsp;3D uses the projection policy, the specified
+field-of-view information, and the front and back clipping distances to
+construct a viewing frustum.
+</p>
+<p>
+</p>
+<h3>Composing Model and Viewing
+Transformations</h3>
+<p><a href="#Figure_4">Figure
+4</a>
+shows a simple scene graph. To draw the object labeled "S,"
+Java&nbsp;3D
+internally constructs the appropriate model, view platform, eye, and
+projection matrices. Conceptually, the model transformation for a
+particular object is computed by concatenating all the matrices in a
+direct path between the object and the VirtualUniverse. The view matrix
+is then computed-again, conceptually-by concatenating all the matrices
+between the VirtualUniverse object and the ViewPlatform attached to the
+current View object. The eye and projection matrices are constructed
+from the View object and its associated component objects.
+</p>
+<p><a name="Figure_4"></a><img style="width: 500px; height: 332px;"
+ alt="Object and ViewPlatform Transform"
+ title="Object and ViewPlatform Transform" src="ViewModel4.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 4</i> &#8211; Object and ViewPlatform
+Transformations</b></font>
+</ul>
+<p>In our scene graph, what we would normally consider the
+model transformation would consist of the following three
+transformations: <strong>LT</strong>1<strong>T</strong>2. By
+multiplying <strong>LT</strong>1<strong>T</strong>2
+by a vertex in the shape object, we would transform that vertex into
+the virtual universe's coordinate system. What we would normally
+consider the view platform transformation would be (<strong>LT</strong>v1)-1
+or <strong>T</strong>v1<sup>-1</sup><strong>L</strong>-1.
+This presents a problem since coordinates in the virtual universe are
+256-bit fixed-point values, which cannot be used to represent
+transformed points efficiently.
+</p>
+<p>Fortunately, however, there is a solution to this problem. Composing
+the model and view platform transformations gives us
+</p>
+<dl>
+ <dt><br>
+ </dt>
+ <dd> <strong>T</strong>v1<sup>-1</sup><strong>L</strong>-1<strong>LT</strong>1<strong>T</strong>2
+= <strong>T</strong>v1<sup>-1</sup><strong>IT</strong>1<strong>T</strong>2
+= <strong>T</strong>v1<sup>-1</sup><strong>T</strong>1<strong>T</strong>2,
+ </dd>
+</dl>
+<p>the matrix that takes vertices in an object's local coordinate
+system
+and places them in the ViewPlatform's coordinate system. Note that the
+high-resolution Locale transformations cancel each other out, which
+removes the need to actually transform points into high-resolution
+VirtualUniverse coordinates. The general formula of the matrix that
+transforms object coordinates to ViewPlatform coordinates is <strong>T</strong>vn<sup>-1</sup>...<strong>T</strong>v2<sup>-1</sup><strong>T</strong>v1<sup>-1</sup><strong>T</strong>1<strong>T</strong>2...<strong>T</strong>m.
+</p>
+<p>As mentioned earlier, the View object contains the remainder of the
+view information, specifically, the eye matrix, <strong>E</strong>,
+that takes points in the View-Platform's local coordinate system and
+translates them into the user's eye coordinate system, and the
+projection matrix, <strong>P</strong>, that projects objects in the
+eye's coordinate system into clipping coordinates. The final
+concatenation of matrices for rendering our shape object "S" on the
+specified Canvas3D is <strong>PET</strong>v1<sup>-1</sup><strong>T</strong>1<strong>T</strong>2.
+In general this is <strong>PET</strong>vn<sup>-1</sup>...<strong>T</strong>v2<sup>-1</sup><strong>T</strong>v1<sup>-1</sup><strong>T</strong>1<strong>T</strong>2...<strong>T</strong>m.
+</p>
+<p>The details of how Java&nbsp;3D constructs the matrices <strong>E</strong>
+and <strong>P</strong> in different end-user configurations are
+described in "<a href="#View_Model_Details">View Model Details</a>."
+</p>
+<p>
+</p>
+<h3>Multiple Locales</h3>
+<p>Java&nbsp;3D supports multiple high-resolution Locales. In some
+cases,
+these
+Locales are close enough to each other that they can "see" each other,
+meaning that objects can be rendered even though they are not in the
+same Locale as the ViewPlatform object that is attached to the View.
+Java&nbsp;3D automatically handles this case without the application
+having
+to do anything. As in the previous example, where the ViewPlatform and
+the object being rendered are attached to the same Locale, Java&nbsp;3D
+internally constructs the appropriate matrices for cases in which the
+ViewPlatform and the object being rendered are <em>not</em> attached
+to the same Locale.
+</p>
+<p>Let's take two Locales, L1 and L2, with the View attached to a
+ViewPlatform in L1. According to our general formula, the modeling
+transformation-the transformation that takes points in object
+coordinates and transforms them into VirtualUniverse coordinates-is <strong>LT</strong>1<strong>T</strong>2...<strong>T</strong>m.
+In our specific example, a point in Locale L2 would be transformed into
+VirtualUniverse coordinates by <strong>L</strong>2<strong>T</strong>1<strong>T</strong>2...<strong>T</strong>m.
+The view platform transformation would be (<strong>L</strong>1<strong>T</strong>v1<strong>T</strong>v1...<strong>T</strong>vn)-1
+or <strong>T</strong>vn<sup>-1</sup>...<strong>T</strong>v2<sup>-1</sup><strong>T</strong>v1<sup>-1</sup><strong>L</strong>1<sup>-1</sup>.
+Composing these two matrices gives us
+</p>
+<dl>
+ <dt><br>
+ </dt>
+ <dd> <strong>T</strong>vn<sup>-1</sup>...<strong>T</strong>v2<sup>-1</sup><strong>T</strong>v1<sup>-1</sup><strong>L</strong>1<sup>-1</sup><strong>L</strong>2<strong>T</strong>1<strong>T</strong>2...<strong>T</strong>m.
+ </dd>
+</dl>
+<p>Thus, to render objects in another Locale, it is sufficient to
+compute <strong>L</strong>1<sup>-1</sup><strong>L</strong>2
+and use that as the starting matrix when composing the model
+transformations. Given that a Locale is represented by a single
+high-resolution coordinate position, the transformation <strong>L</strong>1<sup>-1</sup><strong>L</strong>2
+is a simple translation by <strong>L</strong>2 - <strong>L</strong>1.
+Again, it is not actually necessary to transform points into
+high-resolution VirtualUniverse coordinates.
+</p>
+<p>In general, Locales that are close enough that the difference in
+their
+high-resolution coordinates can be represented in double precision by a
+noninfinite value are close enough to be rendered. In practice, more
+sophisticated culling techniques can be used to render only those
+Locales that really are "close enough."
+</p>
+<p>
+</p>
+<h2>A Minimal Environment</h2>
+<p>An application must create a minimal set of Java&nbsp;3D objects
+before
+Java
+3D can render to a display device. In addition to a Canvas3D object,
+the application must create a View object, with its associated
+PhysicalBody and PhysicalEnvironment objects, and the following scene
+graph elements:
+</p>
+<ul>
+ <li>A VirtualUniverse object</li>
+</ul>
+<ul>
+ <li>A high-resolution Locale object</li>
+</ul>
+<ul>
+ <li>A BranchGroup node object</li>
+</ul>
+<ul>
+ <li>A TransformGroup node object with associated transform</li>
+</ul>
+<ul>
+ <li>A ViewPlatform leaf node object that defines the position and
+orientation within the virtual universe for generating views</li>
+</ul>
+<hr>
+<h2><a name="View_Model_Details"></a>View Model Details</h2>
+<p>An application programmer writing a 3D
+graphics program that will deploy on a variety of platforms must
+anticipate the likely end-user environments and must carefully
+construct the view transformations to match those characteristics using
+a low-level API. This appendix addresses many of the issues an
+application must face and describes the sophisticated features that
+Java&nbsp;3D's advanced view model provides.
+</p>
+<p>
+</p>
+<h2>An Overview of the
+Java&nbsp;3D
+View Model</h2>
+Both camera-based and Java&nbsp;3D-based view models allow a programmer
+to
+specify the shape of a view frustum and, under program control, to
+place, move, and reorient that frustum within the virtual environment.
+However, how they do this varies enormously. Unlike the camera-based
+system, the Java&nbsp;3D view model allows slaving the view frustum's
+position and orientation to that of a six-degrees-of-freedom tracking
+device. By slaving the frustum to the tracker, Java&nbsp;3D can
+automatically modify the view frustum so that the generated images
+match the end-user's viewpoint exactly.
+<p>Java&nbsp;3D must handle two rather different head-tracking
+situations.
+In one case, we rigidly attach a tracker's <em>base</em>,
+and thus its coordinate frame, to the display environment. This
+corresponds to placing a tracker base in a fixed position and
+orientation relative to a projection screen within a room, to a
+computer display on a desk, or to the walls of a multiple-wall
+projection display. In the second head-tracking situation, we rigidly
+attach a tracker's <em>sensor</em>, not its base, to the display
+device. This corresponds to rigidly attaching one of that tracker's
+sensors to a head-mounted display and placing the tracker base
+somewhere within the physical environment.
+</p>
+<p>
+</p>
+<h2>Physical Environments and
+Their Effects</h2>
+Imagine an application where the end user sits on a magic carpet. The
+application flies the user through the virtual environment by
+controlling the carpet's location and orientation within the virtual
+world. At first glance, it might seem that the application also
+controls what the end user will see-and it does, but only
+superficially.
+<p>The following two examples show how end-user environments can
+significantly affect how an application must construct viewing
+transformations.
+</p>
+<p>
+</p>
+<h3>A Head-Mounted Example</h3>
+Imagine that the end user sees the magic carpet and the virtual world
+with a head-mounted display and head tracker. As the application flies
+the carpet through the virtual world, the user may turn to look to the
+left, to the right, or even toward the rear of the carpet. Because the
+head tracker keeps the renderer informed of the user's gaze direction,
+it might not need to draw the scene directly in front of the magic
+carpet. The view that the renderer draws on the head-mount's display
+must match what the end user would see if the experience had occurred
+in the real world.
+<h3>A Room-Mounted Example</h3>
+Imagine a slightly different scenario where the end user sits in a
+darkened room in front of a large projection screen. The application
+still controls the carpet's flight path; however, the position and
+orientation of the user's head barely influences the image drawn on the
+projection screen. If a user looks left or right, then he or she sees
+only the darkened room. The screen does not move. It's as if the screen
+represents the magic carpet's "front window" and the darkened room
+represents the "dark interior" of the carpet.
+<p>By adding a left and right screen, we give the magic carpet rider a
+more complete view of the virtual world surrounding the carpet. Now our
+end user sees the view to the left or right of the magic carpet by
+turning left or right.
+</p>
+<p>
+</p>
+<h3>Impact of Head Position and
+Orientation on the Camera</h3>
+In the head-mounted example, the user's head position and orientation
+significantly affects a camera model's camera position and orientation
+but hardly has any effect on the projection matrix. In the room-mounted
+example, the user's head position and orientation contributes little to
+a camera model's camera position and orientation; however, it does
+affect the projection matrix.
+<p>From a camera-based perspective, the application developer must
+construct the camera's position and orientation by combining the
+virtual-world component (the position and orientation of the magic
+carpet) and the physical-world component (the user's instantaneous head
+position and orientation).
+</p>
+<p>Java&nbsp;3D's view model incorporates the appropriate abstractions
+to
+compensate automatically for such variability in end-user hardware
+environments.
+</p>
+<p>
+</p>
+<h2>The Coordinate Systems</h2>
+The basic view model consists of eight or nine coordinate systems,
+depending on whether the end-user environment consists of a
+room-mounted display or a head-mounted display. First, we define the
+coordinate systems used in a room-mounted display environment. Next, we
+define the added coordinate system introduced when using a head-mounted
+display system.
+<h3>Room-Mounted Coordinate
+Systems</h3>
+The room-mounted coordinate system is divided into the virtual
+coordinate system and the physical coordinate system. <a
+ href="#Figure_5">Figure
+5</a>
+shows these coordinate systems graphically. The coordinate systems
+within the grayed area exist in the virtual world; those outside exist
+in the physical world. Note that the coexistence coordinate system
+exists in both worlds.
+<h4>The Virtual Coordinate
+Systems</h4>
+<h5> The Virtual World Coordinate System</h5>
+The virtual world coordinate system encapsulates
+the unified coordinate system for all scene graph objects in the
+virtual environment. For a given View, the virtual world coordinate
+system is defined by the Locale object that contains the ViewPlatform
+object attached to the View. It is a right-handed coordinate system
+with +<em>x</em> to the right, +<em>y</em> up, and +<em>z</em> toward
+the viewer.
+<h5> The ViewPlatform Coordinate System</h5>
+The ViewPlatform coordinate system is the local coordinate system of
+the ViewPlatform leaf node to which the View is attached.
+<p><a name="Figure_5"></a><img style="width: 500px; height: 181px;"
+ alt="Display Rigidly Attached to Tracker Base"
+ title="Display Rigidly Attached to Tracker Base" src="ViewModel5.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 5</i> &#8211; Display Rigidly Attached to the
+Tracker Base</b></font>
+</ul>
+<p>
+</p>
+<h5> The Coexistence Coordinate System</h5>
+A primary implicit goal of any view model is to map a specified local
+portion of the physical world onto a specified portion of the virtual
+world. Once established, one can legitimately ask where the user's head
+or hand is located within the virtual world or where a virtual object
+is located in the local physical world. In this way the physical user
+can interact with objects inhabiting the virtual world, and vice versa.
+To establish this mapping, Java&nbsp;3D defines a special coordinate
+system,
+called coexistence coordinates, that is defined to exist in both the
+physical world and the virtual world.
+<p>The coexistence coordinate system exists half in the virtual world
+and
+half in the physical world. The two transforms that go from the
+coexistence coordinate system to the virtual world coordinate system
+and back again contain all the information needed to expand or shrink
+the virtual world relative to the physical world. It also contains the
+information needed to position and orient the virtual world relative to
+the physical world.
+</p>
+<p>Modifying the transform that maps the coexistence coordinate system
+into the virtual world coordinate system changes what the end user can
+see. The Java&nbsp;3D application programmer moves the end user within
+the
+virtual world by modifying this transform.
+</p>
+<p>
+</p>
+<h4>The Physical Coordinate
+Systems</h4>
+<h5> The Head Coordinate System</h5>
+The head coordinate system allows an application to import its user's
+head geometry. The coordinate system provides a simple consistent
+coordinate frame for specifying such factors as the location of the
+eyes and ears.
+<h5> The Image Plate Coordinate System</h5>
+The image plate coordinate system corresponds with the physical
+coordinate system of the image generator. The image plate is defined as
+having its origin at the lower left-hand corner of the display area and
+as lying in the display area's <em>XY</em>
+plane. Note that image plate is a different coordinate system than
+either left image plate or right image plate. These last two coordinate
+systems are defined in head-mounted environments only.
+<h5> The Head Tracker Coordinate System</h5>
+The head tracker coordinate system corresponds to the
+six-degrees-of-freedom tracker's sensor attached to the user's head.
+The head tracker's coordinate system describes the user's instantaneous
+head position.
+<h5> The Tracker Base Coordinate System</h5>
+The tracker base coordinate system corresponds to the emitter
+associated with absolute position/orientation trackers. For those
+trackers that generate relative position/orientation information, this
+coordinate system is that tracker's initial position and orientation.
+In general, this coordinate system is rigidly attached to the physical
+world.
+<h3>Head-Mounted Coordinate
+Systems</h3>
+Head-mounted coordinate systems divide the same virtual coordinate
+systems and the physical coordinate systems. <a href="#Figure_6">Figure
+6</a>
+shows these coordinate systems graphically. As with the room-mounted
+coordinate systems, the coordinate systems within the grayed area exist
+in the virtual world; those outside exist in the physical world. Once
+again, the coexistence coordinate system exists in both worlds. The
+arrangement of the coordinate system differs from those for a
+room-mounted display environment. The head-mounted version of
+Java&nbsp;3D's
+coordinate system differs in another way. It includes two image plate
+coordinate systems, one for each of an end-user's eyes.
+<h5> The Left Image Plate and Right Image Plate Coordinate Systems</h5>
+The left image plate and right image plate
+coordinate systems correspond with the physical coordinate system of
+the image generator associated with the left and right eye,
+respectively. The image plate is defined as having its origin at the
+lower left-hand corner of the display area and lying in the display
+area's <em>XY</em> plane. Note that the left image plate's <em>XY</em>
+plane does not necessarily lie parallel to the right image plate's <em>XY</em>
+plane. Note that the left image plate and the right image plate are
+different coordinate systems than the room-mounted display
+environment's image plate coordinate system.
+<p><a name="Figure_6"></a><img style="width: 499px; height: 162px;"
+ alt="Display Rigidly Attached to Head Tracker"
+ title="Display Rigidly Attached to Head Tracker" src="ViewModel6.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 6</i> &#8211; Display Rigidly Attached to the
+Head Tracker (Sensor)</b></font>
+</ul>
+<p>
+</p>
+<h2>The Screen3D Object</h2>
+A Screen3D object represents one independent display device. The most
+common environment for a Java&nbsp;3D application is a desktop computer
+with
+or without a head tracker. <a href="#Figure_7">Figure
+7</a> shows a scene graph fragment for a display environment designed
+for such an end-user environment. <a href="#Figure_8">Figure
+8</a> shows a display environment that matches the scene graph
+fragment in <a href="#Figure_7">Figure
+7</a>.
+<p><a name="Figure_7"></a><img style="width: 499px; height: 185px;"
+ alt="Environment with Single Screen3D Object"
+ title="Environment with Single Screen3D Object" src="ViewModel7.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 7</i> &#8211; A Portion of a Scene Graph
+Containing a Single Screen3D
+Object</b></font>
+</ul>
+<p>
+<a name="Figure_8"></a><img style="width: 500px; height: 237px;"
+ alt="Single-Screen Display Environment"
+ title="Single-Screen Display Environment" src="ViewModel8.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 8</i> &#8211; A Single-Screen Display
+Environment</b></font>
+</ul>
+<p>
+A multiple-projection wall display presents a more exotic environment.
+Such environments have multiple screens, typically three or more. <a
+ href="#Figure_9">Figure
+9</a> shows a scene graph fragment representing such a system, and <a
+ href="#Figure_10">Figure
+10</a> shows the corresponding display environment.
+</p>
+<p><a name="Figure_9"></a><img style="width: 500px; height: 196px;"
+ alt="Environment with Three Screen3D Object"
+ title="Environment with Three Screen3D Object" src="ViewModel9.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 9</i> &#8211; A Portion of a Scene Graph
+Containing Three Screen3D
+Objects</b></font>
+</ul>
+<p>
+<a name="Figure_10"></a><img style="width: 700px; height: 241px;"
+ alt="Three-Screen Display Environment"
+ title="Three-Screen Display Environment" src="ViewModel10.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 10</i> &#8211; A Three-Screen Display
+Environment</b></font>
+</ul>
+<p>
+A multiple-screen environment requires more care during the
+initialization and calibration phase. Java&nbsp;3D must know how the
+Screen3Ds are placed with respect to one another, the tracking device,
+and the physical portion of the coexistence coordinate system.
+</p>
+<p>
+</p>
+<h2>Viewing in Head-Tracked Environments</h2>
+<p>The "<a href="#Generating_View">Generating a View</a>" section
+describes how Java&nbsp;3D generates a view for a standard flat-screen
+display with no head tracking. In this section, we describe how
+Java&nbsp;3D
+generates a view in a room-mounted, head-tracked display
+environment-either a computer monitor with shutter glasses and head
+tracking or a multiple-wall display with head-tracked shutter glasses.
+Finally, we describe how Java&nbsp;3D generates view matrices in a
+head-mounted and head-tracked display environment.
+</p>
+<h3>A Room-Mounted Display with
+Head Tracking</h3>
+When head tracking combines with a room-mounted
+display environment (for example, a standard flat-screen display), the
+ViewPlatform's origin and orientation serve as a base for constructing
+the view matrices. Additionally, Java&nbsp;3D uses the end-user's head
+position and orientation to compute where an end-user's eyes are
+located in physical space. Each eye's position serves to offset the
+corresponding virtual eye's position relative to the ViewPlatform's
+origin. Each eye's position also serves to specify that eye's frustum
+since the eye's position relative to a Screen3D uniquely specifies that
+eye's view frustum. Note that Java&nbsp;3D will access the PhysicalBody
+object to obtain information describing the user's interpupilary
+distance and tracking hardware, values it needs to compute the
+end-user's eye positions from the head position information.
+<h3>A Head-Mounted Display with
+Head Tracking</h3>
+In a head-mounted environment, the ViewPlatform's origin and
+orientation also serves as a base for constructing view matrices. And,
+as in the head-tracked, room-mounted environment, Java&nbsp;3D also
+uses the
+end-user's head position and orientation to modify the ViewPlatform's
+position and orientation further. In a head-tracked, head-mounted
+display environment, an end-user's eyes do not move relative to their
+respective display screens, rather, the display screens move relative
+to the virtual environment. A rotation of the head by an end user can
+radically affect the final view's orientation. In this situation, Java
+3D combines the position and orientation from the ViewPlatform with the
+position and orientation from the head tracker to form the view matrix.
+The view frustum, however, does not change since the user's eyes do not
+move relative to their respective display screen, so Java&nbsp;3D can
+compute the projection matrix once and cache the result.
+<p>If any of the parameters of a View object are updated, this will
+effect
+a change in the implicit viewing transform (and thus image) of any
+Canvas3D that references that View object.
+</p>
+<p>
+</p>
+<h2>Compatibility Mode</h2>
+<p>A camera-based view model allows application programmers to think
+about
+the images displayed on the computer screen as if a virtual camera took
+those images. Such a view model allows application programmers to
+position and orient a virtual camera within a virtual scene, to
+manipulate some parameters of the virtual camera's lens (specify its
+field of view), and to specify the locations of the near and far
+clipping planes.
+</p>
+<p>Java&nbsp;3D allows applications to enable compatibility mode for
+room-mounted, non-head-tracked display environments or to disable
+compatibility mode using the following methods. Camera-based viewing
+functions are available only in compatibility mode. The <code>setCompatibilityModeEnable</code>
+method turns compatibility mode on or off. Compatibility mode is
+disabled by default.
+</p>
+<hr noshade="noshade">
+<p><b>Note:</b> Use of these view-compatibility
+functions will disable some of Java&nbsp;3D's view model features and
+limit
+the portability of Java&nbsp;3D programs. These methods are primarily
+intended to help jump-start porting of existing applications.
+</p>
+<hr noshade="noshade">
+<h3>Overview of the
+Camera-Based View Model</h3>
+The traditional camera-based view model, shown in <a href="#Figure_11">Figure
+11</a>,
+places a virtual camera inside a geometrically specified world. The
+camera "captures" the view from its current location, orientation, and
+perspective. The visualization system then draws that view on the
+user's display device. The application controls the view by moving the
+virtual camera to a new location, by changing its orientation, by
+changing its field of view, or by controlling some other camera
+parameter.
+<p>The various parameters that users control in a
+camera-based view model specify the shape of a viewing volume (known as
+a frustum because of its truncated pyramidal shape) and locate that
+frustum within the virtual environment. The rendering pipeline uses the
+frustum to decide which objects to draw on the display screen. The
+rendering pipeline does not draw objects outside the view frustum, and
+it clips (partially draws) objects that intersect the frustum's
+boundaries.
+</p>
+<p>Though a view frustum's specification may have many items in common
+with those of a physical camera, such as placement, orientation, and
+lens settings, some frustum parameters have no physical analog. Most
+noticeably, a frustum has two parameters not found on a physical
+camera: the near and far clipping planes.
+</p>
+<p><a name="Figure_11"></a><img style="width: 500px; height: 202px;"
+ alt="Camera-Based View Model" title="Camera-Based View Model"
+ src="ViewModel11.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 11</i> &#8211; The Camera-Based View Model</b></font>
+</ul>
+<p>
+The location of the near and far clipping planes allows the application
+programmer to specify which objects Java&nbsp;3D should not draw.
+Objects
+too far away from the current eyepoint usually do not result in
+interesting images. Those too close to the eyepoint might obscure the
+interesting objects. By carefully specifying near and far clipping
+planes, an application programmer can control which objects the
+renderer will not be drawing.
+</p>
+<p>From the perspective of the display device, the virtual camera's
+image
+plane corresponds to the display screen. The camera's placement,
+orientation, and field of view determine the shape of the view frustum.
+</p>
+<p>
+</p>
+<h3>Using the Camera-Based View
+Model</h3>
+<p>The camera-based view model allows Java&nbsp;3D to bridge the gap
+between
+existing 3D code and Java&nbsp;3D's view model. By using the
+camera-based
+view model methods, a programmer retains the familiarity of the older
+view model but gains some of the flexibility afforded by Java&nbsp;3D's
+new
+view model.
+</p>
+<p>The traditional camera-based view model is supported in Java&nbsp;3D
+by
+helping methods in the Transform3D object. These methods were
+explicitly designed to resemble as closely as possible the view
+functions of older packages and thus should be familiar to most 3D
+programmers. The resulting Transform3D objects can be used to set
+compatibility-mode transforms in the View object.
+</p>
+<p>
+</p>
+<h4>Creating a Viewing Matrix</h4>
+<p>The Transform3D object provides a <code>lookAt</code> utility
+method
+to create a
+viewing matrix. This method specifies the position and orientation of
+a viewing transform. It works similarly to the equivalent function in
+OpenGL. The inverse of this transform can be used to control the
+ViewPlatform object within the scene graph. Alternatively, this
+transform can be passed directly to the View's <code>VpcToEc</code>
+transform via the compatibility-mode viewing functions. The <code>setVpcToEc</code><code></code>
+method is used to set the viewing matrix when in compatibility mode.
+</p>
+<h4>Creating a Projection
+Matrix</h4>
+<p>The Transform3D object provides three methods for
+creating a projection matrix: <code>frustum</code>, <code>perspective</code>,
+and <code>ortho</code>. All three map points from eye coordinates
+(EC) to clipping coordinates (CC). Eye coordinates are defined such
+that (0, 0, 0) is at the eye and the projection plane is at <em>z</em>
+= -1.<br>
+</p>
+<p>The <code>frustum</code> method
+establishes a perspective projection with the eye at the apex of a
+symmetric view frustum. The transform maps points from eye coordinates
+to clipping coordinates. The clipping coordinates generated by the
+resulting transform are in a right-handed coordinate system (as are all
+other coordinate systems in Java&nbsp;3D).
+</p>
+<p>The arguments define the frustum and its associated perspective
+projection: <code>(left</code>, <code>bottom</code>, <code>-near)</code>
+and <code>(right</code>, <code>top</code>, <code>-near)</code>
+specify the point on the near clipping plane that maps onto the
+lower-left and upper-right corners of the window, respectively. The <code>-far</code>
+parameter specifies the far clipping plane. See <a href="#Figure_12">Figure
+12</a>.
+</p>
+<p>The <code>perspective</code> method establishes a perspective
+projection with the eye at the apex of a symmetric view frustum,
+centered about the <em>Z</em>-axis,
+with a fixed field of view. The resulting perspective projection
+transform mimics a standard camera-based view model. The transform maps
+points from eye coordinates to clipping coordinates. The clipping
+coordinates generated by the resulting transform are in a right-handed
+coordinate system.
+</p>
+<p>The arguments define the frustum and its associated perspective
+projection: <code>-near</code> and <code>-far</code> specify the near
+and far clipping planes; <code>fovx</code> specifies the field of view
+in the <em>X</em> dimension, in radians; and <code>aspect</code>
+specifies the aspect ratio of the window. See <a href="#Figure_13">Figure
+13</a>.
+</p>
+<p><a name="Figure_12"></a><img style="width: 500px; height: 209px;"
+ alt="Perspective Viewing Frustum" title="Perspective Viewing Frustum"
+ src="ViewModel12.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 12</i> &#8211; A Perspective Viewing Frustum</b></font>
+</ul>
+<p>
+<a name="Figure_13"></a><img style="width: 500px; height: 212px;"
+ alt="Perspective View Model Arguments"
+ title="Perspective View Model Arguments" src="ViewModel13.gif"></p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 13</i> &#8211; Perspective View Model Arguments</b></font>
+</ul>
+<p>
+The <code>ortho</code> method
+establishes a parallel projection. The orthographic projection
+transform mimics a standard camera-based video model. The transform
+maps points from eye coordinates to clipping coordinates. The clipping
+coordinates generated by the resulting transform are in a right-handed
+coordinate system.
+</p>
+<p>The arguments define a rectangular box used for projection: <code>(left</code>,
+<code>bottom</code>, <code>-near)</code> and <code>(right</code>, <code>top</code>,
+<code>-near)</code>
+specify the point on the near clipping plane that maps onto the
+lower-left and upper-right corners of the window, respectively. The <code>-far</code>
+parameter specifies the far clipping plane. See <a href="#Figure_14">Figure
+14</a>.
+</p>
+<p><a name="Figure_14"></a><img style="width: 500px; height: 220px;"
+ alt="Orthographic View Model" title="Orthographic View Model"
+ src="ViewModel14.gif">
+</p>
+<p>
+</p>
+<ul>
+ <font size="-1"><b><i>Figure 14</i> &#8211; Orthographic View Model</b></font>
+</ul>
+<p>
+</p>
+<p>The <code>setLeftProjection</code>
+and <code>setRightProjection</code> methods are used to set the
+projection matrices for the left eye and right eye, respectively, when
+in compatibility mode.</p>
</body>
</html>
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel1.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel1.gif
new file mode 100644
index 0000000..e94743e
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel1.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel10.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel10.gif
new file mode 100644
index 0000000..aceb6e7
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel10.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel11.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel11.gif
new file mode 100644
index 0000000..f943c15
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel11.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel12.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel12.gif
new file mode 100644
index 0000000..787afe7
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel12.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel13.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel13.gif
new file mode 100644
index 0000000..a8482ef
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel13.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel14.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel14.gif
new file mode 100644
index 0000000..f201443
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel14.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel2.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel2.gif
new file mode 100644
index 0000000..2d549b1
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel2.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel3.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel3.gif
new file mode 100644
index 0000000..5285015
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel3.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel4.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel4.gif
new file mode 100644
index 0000000..ab9db1d
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel4.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel5.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel5.gif
new file mode 100644
index 0000000..859b456
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel5.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel6.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel6.gif
new file mode 100644
index 0000000..2200595
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel6.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel7.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel7.gif
new file mode 100644
index 0000000..ec84ac2
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel7.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel8.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel8.gif
new file mode 100644
index 0000000..ee4b331
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel8.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/ViewModel9.gif b/src/classes/share/javax/media/j3d/doc-files/ViewModel9.gif
new file mode 100644
index 0000000..0cbf72c
--- /dev/null
+++ b/src/classes/share/javax/media/j3d/doc-files/ViewModel9.gif
Binary files differ
diff --git a/src/classes/share/javax/media/j3d/doc-files/intro.html b/src/classes/share/javax/media/j3d/doc-files/intro.html
index 3609caf..d981179 100644
--- a/src/classes/share/javax/media/j3d/doc-files/intro.html
+++ b/src/classes/share/javax/media/j3d/doc-files/intro.html
@@ -203,9 +203,10 @@ graph is a subgraph that is rooted by a BranchGroup node that is
attached to the superstructure. For more information, see "<a
href="SceneGraphOverview.html">Scene Graph Basics</a>."
</p>
-<p>
-<img style="width: 500px; height: 263px;" alt="Application scene graph"
- title="Application scene graph" src="intro.gif"></p>
+<p><a name="Figure_1"></a><img style="width: 500px; height: 263px;"
+ alt="Application
+scene graph" title="Application scene graph"
+ src="intro.gif"></p>
<p>
</p>
<ul>