diff --git a/hunks/opencv/clipbycolor.js b/hunks/opencv/clipbycolor.js
index 5408491730cd6f1829f081ca7c97f0175cd5bc26..9160490a8a7f479489ea30b927206dec752ae49a 100644
--- a/hunks/opencv/clipbycolor.js
+++ b/hunks/opencv/clipbycolor.js
@@ -14,7 +14,7 @@ function OCVClipByColor() {
   Hunkify(this)
 
   // coupla globals,
-  let low, high, clipped
+  let low, high, clipped, kernel
   let x = 320
   let y = 240
 
@@ -36,6 +36,8 @@ function OCVClipByColor() {
     high = new cv.Mat(y, x, cv.CV_8UC4, [varr[1], varr[3], varr[5], 255])
     // also,
     clipped = new cv.Mat(y, x, cv.CV_8UC1)
+    // and ah kernel
+    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, new cv.Size(15, 15))
     // ok,
     if (setState) boundState.set(str)
   }
@@ -76,7 +78,13 @@ function OCVClipByColor() {
   this.loop = () => {
     if (!matOutput.io && matInput.io && go) {
       cv.inRange(matInput.get(), low, high, clipped)
+      // also the kernelz
+      cv.morphologyEx(clipped, clipped, cv.MORPH_CLOSE, kernel)
+      cv.morphologyEx(clipped, clipped, cv.MORPH_OPEN, kernel)
+      // and then we
       cv.imshow('threshid', clipped)
+      // and,
+      matOutput.put(clipped)
     }
   }
 
diff --git a/hunks/opencv/contourcenters.js b/hunks/opencv/contourcenters.js
new file mode 100644
index 0000000000000000000000000000000000000000..4db912e6b238b093fc55d4906aee418e108b3b6e
--- /dev/null
+++ b/hunks/opencv/contourcenters.js
@@ -0,0 +1,123 @@
+import {
+  Hunkify,
+  Input,
+  Output,
+  State
+} from '../hunks.js'
+
+import {
+  loadOpenCv,
+  getWebcam
+} from '../../libs/opencvwrap.js'
+
+function OCVContourCenters() {
+  Hunkify(this)
+
+  // coupla globals,
+  let dsp, hierarchy, contours, dcolour, c1, c2, e1, e2
+  let x = 320
+  let y = 240
+
+  // pardon the no-doc, but it's like rlow, rhigh, glow, ghigh, blow, bhigh
+  let numContours = new State('number', 'numContours', 2)
+  // could do set-size here also, or should,
+  let resetMatricies = (num) => {
+    // set canvas,
+    $(canvas).width(x).height(y)
+    this.requestResize(x, y)
+
+    dsp = new cv.Mat(y, x, cv.CV_8UC4)
+    hierarchy = new cv.Mat()
+    contours = new cv.MatVector()
+    dcolour = new cv.Scalar(255, 0, 0, 255)
+    // eh,
+  }
+  this.states.push(numContours)
+
+  // input,
+  let matInput = new Input('reference', '(ocvmat) in', this)
+  let layInput = new Input('reference', '(ocvmat) underlay', this)
+  this.inputs.push(matInput, layInput)
+
+  let xOne = new Output('number', 'x1', this)
+  let yOne = new Output('number', 'y1', this)
+  let xTwo = new Output('number', 'x2', this)
+  let yTwo = new Output('number', 'y2', this)
+  this.outputs.push(xOne, yOne, xTwo, yTwo)
+
+  let allClear = () => {
+    if (xOne.io || yOne.io || xTwo.io || yTwo.io) {
+      return false
+    } else {
+      return true
+    }
+  }
+
+  this.init = () => {
+    //
+  }
+
+  this.dom = $('<div>')
+  let canvas = $('<canvas>')
+  let go = false
+
+  this.onload = () => {
+    $(this.dom).append(canvas)
+    $(canvas).attr('id', 'centid')
+    // important to do this after cv is available,
+    loadOpenCv().then(() => {
+      resetMatricies(numContours.value)
+      go = true
+    })
+  }
+
+  let drawCross = (mat, x, y, size) => {
+    let halflen = size / 2
+    cv.line(mat, {x: x - halflen, y: y}, {x: x + halflen, y: y}, [255,0,0,255], 1, cv.LINE_AA)
+    cv.line(mat, {x: x, y:y - halflen}, {x: x, y: y + halflen}, [255,0,0,255], 1, cv.LINE_AA)
+    //console.log('xy', x, y)
+  }
+
+  this.loop = () => {
+    if(!go) return
+    if (layInput.io) {
+      dsp = layInput.get().clone()
+    }
+    if (allClear() && matInput.io) {
+      cv.findContours(matInput.get(), contours, hierarchy, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)
+      cv.drawContours(dsp, contours, 0, dcolour, 0, cv.LINE_8, hierarchy, 100)
+      cv.drawContours(dsp, contours, 1, dcolour, 0, cv.LINE_8, hierarchy, 100)
+      try {
+        // ok, do two,
+        let c1, c2
+        c1 = contours.get(0)
+        c2 = contours.get(1)
+        if(c1 && c2){
+          e1 = cv.fitEllipse(c1)
+          if(e1){
+            drawCross(dsp, e1.center.x, e1.center.y, 10)
+            if(!xOne.io && !yOne.io){
+              xOne.put(e1.center.x)
+              yOne.put(e1.center.y)
+            }
+          }
+          e2 = cv.fitEllipse(c2)
+          if(e2){
+            drawCross(dsp, e2.center.x, e2.center.y, 10)
+            if(!xTwo.io && !yTwo.io){
+              xTwo.put(e2.center.x)
+              yTwo.put(e2.center.y)
+            }
+          }
+        }
+
+      } catch (err) {
+        //
+      }
+      cv.imshow('centid', dsp)
+    }
+  }
+
+}
+
+export default OCVContourCenters
diff --git a/hunks/opencv/webcam.js b/hunks/opencv/webcam.js
index 948b10bda37e90008f6c6a4861c6f1d824827713..4162d112d447cc0083ea15a7f8b15b5423405a40 100644
--- a/hunks/opencv/webcam.js
+++ b/hunks/opencv/webcam.js
@@ -88,6 +88,7 @@ function OCVWebcam() {
       //$(this.dom).append(videoElement)
       cam = new cv.VideoCapture(videoElement)
       dst = new cv.Mat(y, x, cv.CV_8UC4)
+      go = true
     })
   }
 
@@ -96,9 +97,10 @@ function OCVWebcam() {
   }
 
   let timeout = true
+  let go = false
 
   this.loop = () => {
-    if (cam && cv && !matOutput.io && timeout) {
+    if (cam && cv && !matOutput.io && timeout && go) {
       // read the cam
       cam.read(dst)
       // show the slice
diff --git a/libs/opencvwrap.js b/libs/opencvwrap.js
index 425f26e2ea5e4bd1fa386c44b9279ab4ea84b33b..ff0f824632309ebafd966cfd17553ff1929598d3 100644
--- a/libs/opencvwrap.js
+++ b/libs/opencvwrap.js
@@ -2,15 +2,22 @@
 
 const OPENCV_URL = 'libs/opencv.js'
 
+window.cvgo = false
+window.cvloading = false
+window.cvresolvers = []
+
 function loadOpenCv() {
   return new Promise((resolve, reject) => {
-    try{
-      if(cv){
-        console.log('existing cv, thx')
-        resolve()
-      }
-    } catch (err) {
-      console.log('loading cv, ok')
+    console.log('CVLOAD: begin', window.cvgo, window.cvloading, window.cvresolvers.length)
+    if (window.cvgo && !window.cvloading && cv) {
+      console.log('CVLOAD: here already')
+      resolve()
+    } else if (window.cvloading) {
+      console.log('CVLOAD: already loading, adding to resolve')
+      window.cvresolvers.push(resolve)
+    } else {
+      console.log('CVLOAD: loading cv, ok')
+      window.cvloading = true
       let script = document.createElement('script')
       script.setAttribute('async', '')
       script.setAttribute('type', 'text/javascript')
@@ -18,10 +25,18 @@ function loadOpenCv() {
         // ocv drops 'cv' in to toplevel documeeeent yikes
         // let's see if we can just stash this local copy,
         // and shell it out ...
+        console.log('CVLOAD: loaded')
+        window.cvgo = true
+        window.cvloading = false
         resolve()
+        if(window.cvresolvers.length > 0){
+          for(let res of window.cvresolvers){
+            res()
+          }
+        }
       })
       script.addEventListener('error', (err) => {
-        console.error('failed to load script', err)
+        console.error('CVLOAD: failed to load script', err)
         reject(err)
       })
       script.src = OPENCV_URL
@@ -31,7 +46,7 @@ function loadOpenCv() {
   })
 }
 
-function getWebcam(w, h){
+function getWebcam(w, h) {
   return new Promise((resolve, reject) => {
     let video = document.createElement('video')
     video.width = w
@@ -50,4 +65,7 @@ function getWebcam(w, h){
   })
 }
 
-export { loadOpenCv, getWebcam }
+export {
+  loadOpenCv,
+  getWebcam
+}
diff --git a/save/contexts/cuttlefish/cvBalltrack.json b/save/contexts/cuttlefish/cvBalltrack.json
new file mode 100644
index 0000000000000000000000000000000000000000..62c9742aa9ab7d2b25b8589ad0d965fdf0b9f210
--- /dev/null
+++ b/save/contexts/cuttlefish/cvBalltrack.json
@@ -0,0 +1,153 @@
+{
+  "interpreterName": "cuttlefish",
+  "interpreterVersion": "v0.1",
+  "hunks": [
+    {
+      "type": "manager",
+      "name": "nrol",
+      "inputs": [
+        {
+          "name": "msgs",
+          "type": "byteArray"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "msgs",
+          "type": "byteArray",
+          "connections": [
+            {
+              "inHunkIndex": 1,
+              "inHunkInput": 0
+            }
+          ]
+        }
+      ],
+      "states": []
+    },
+    {
+      "type": "view",
+      "name": "tlview",
+      "inputs": [
+        {
+          "name": "msgs",
+          "type": "byteArray"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "msgs",
+          "type": "byteArray",
+          "connections": [
+            {
+              "inHunkIndex": 0,
+              "inHunkInput": 0
+            }
+          ]
+        }
+      ],
+      "states": []
+    },
+    {
+      "type": "opencv/webcam",
+      "name": "opencv/webcam_2",
+      "inputs": [],
+      "outputs": [
+        {
+          "name": "(ocvmat) capture",
+          "type": "reference",
+          "connections": [
+            {
+              "inHunkIndex": 3,
+              "inHunkInput": 0
+            },
+            {
+              "inHunkIndex": 4,
+              "inHunkInput": 1
+            }
+          ]
+        }
+      ],
+      "states": [
+        {
+          "name": "dimensions",
+          "type": "string",
+          "value": "320, 240"
+        },
+        {
+          "name": "delay",
+          "type": "number",
+          "value": 100
+        }
+      ]
+    },
+    {
+      "type": "opencv/clipbycolor",
+      "name": "opencv/clipbycolor_3",
+      "inputs": [
+        {
+          "name": "(ocvmat) in",
+          "type": "reference"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "(ocvmat) out",
+          "type": "reference",
+          "connections": [
+            {
+              "inHunkIndex": 4,
+              "inHunkInput": 0
+            }
+          ]
+        }
+      ],
+      "states": [
+        {
+          "name": "inRange",
+          "type": "string",
+          "value": "50, 255, 0, 50, 0, 50"
+        }
+      ]
+    },
+    {
+      "type": "opencv/contourcenters",
+      "name": "opencv/contourcenters_4",
+      "inputs": [
+        {
+          "name": "(ocvmat) in",
+          "type": "reference"
+        },
+        {
+          "name": "(ocvmat) underlay",
+          "type": "reference"
+        }
+      ],
+      "outputs": [
+        {
+          "name": "x1",
+          "type": "number"
+        },
+        {
+          "name": "y1",
+          "type": "number"
+        },
+        {
+          "name": "x2",
+          "type": "number"
+        },
+        {
+          "name": "y2",
+          "type": "number"
+        }
+      ],
+      "states": [
+        {
+          "name": "numContours",
+          "type": "number",
+          "value": 2
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/scratch/opencv/README.md b/scratch/opencv/README.md
index f510bd97fea6d69ec04cee78e08dd572c98643df..dde4286b67c618bc6b007cc2262461ef68739769 100644
--- a/scratch/opencv/README.md
+++ b/scratch/opencv/README.md
@@ -65,15 +65,16 @@ Great, I've gotten this up to finding the center point of a red thing. Here's my
 
 ![targ](doc/2019-06-17-ocv-targets-aqc.png)
 
-## Real Implementation Notes
+## CF meets CV
 
- - more better markers
+![cfcv](doc/2019-06-17-ocv-in-cuttlefish.png)
 
- - how to import cv into cuttlefish space, checking for existence so that we
- - don't go twice?
+This is OK. It slows the CF dom down some - pans and zooms have a noticable 'tick' on frames, so, the only way to learn how to make performance computing is to actually try to do difficult things.
 
- - webcam hunk, (ocvmat) type output
- - inrange/kernal for (state) between
- - centroid/contours, outputting (some #) of xy pixel float (change # outputs)
- - log'em or something
- - setup machine etc
+I'm up to where I was at with the demo, now in cf. Next is outputting some values.
+
+A final roll would see well-configurable statemachines for those hunks: i.e. variable #s of outputs, variable frame sizes. Lots to handle: easier is to just take simple cases on first...
+
+Might try to roll in some charts as well, I figure. Maybe: take the difference of two variables, scale to some #, plot that.
+
+There's a small hardware trick to pull also... and I'd like to sleep tonight, so... 
diff --git a/scratch/opencv/doc/2019-06-17-ocv-in-cuttlefish.png b/scratch/opencv/doc/2019-06-17-ocv-in-cuttlefish.png
new file mode 100644
index 0000000000000000000000000000000000000000..fcab6975eb236c060b1b41de60391b3f63333127
Binary files /dev/null and b/scratch/opencv/doc/2019-06-17-ocv-in-cuttlefish.png differ