Package omeroweb :: Package webtest :: Module views
[hide private]
[frames] | no frames]

Source Code for Module omeroweb.webtest.views

  1  from django.http import HttpResponseRedirect, HttpResponse 
  2  from django.core.urlresolvers import reverse 
  3  from django.shortcuts import render_to_response 
  4  from omeroweb.webgateway.views import getBlitzConnection, _session_logout 
  5  from omeroweb.webgateway import views as webgateway_views 
  6  from omeroweb.webclient.views import isUserConnected 
  7   
  8  from webtest_utils import getSpimData 
  9  from cStringIO import StringIO 
 10   
 11  import settings 
 12  import logging 
 13  import traceback 
 14  import omero 
 15  from omero.rtypes import rint, rstring 
 16  import omero.gateway 
 17   
 18  logger = logging.getLogger('webtest')     
 19   
 20   
 21  try: 
 22      import Image 
 23  except: #pragma: nocover 
 24      try: 
 25          from PIL import Image 
 26      except: 
 27          logger.error('No PIL installed, line plots and split channel will fail!') 
28 29 30 @isUserConnected # wrapper handles login (or redirects to webclient login). Connection passed in **kwargs 31 -def dataset(request, datasetId, **kwargs):
32 """ 'Hello World' example from tutorial on http://trac.openmicroscopy.org.uk/ome/wiki/OmeroWeb """ 33 conn = kwargs['conn'] 34 ds = conn.getObject("Dataset", datasetId) # before OMERO 4.3 this was conn.getDataset(datasetId) 35 return render_to_response('webtest/dataset.html', {'dataset': ds}) # generate html from template
36
37 38 -def login (request):
39 """ 40 Attempts to get a connection to the server by calling L{omeroweb.webgateway.views.getBlitzConnection} with the 'request' 41 object. If a connection is created, the user is directed to the 'webtest_index' page. 42 If a connection is not created, this method returns a login page. 43 44 @param request: The django http request 45 @return: The http response - webtest_index or login page 46 """ 47 if request.method == 'POST' and request.REQUEST['server']: 48 blitz = settings.SERVER_LIST.get(pk=request.REQUEST['server']) 49 request.session['server'] = blitz.id 50 request.session['host'] = blitz.host 51 request.session['port'] = blitz.port 52 53 conn = getBlitzConnection (request, useragent="OMERO.webtest") 54 logger.debug(conn) 55 if conn is not None: 56 return HttpResponseRedirect(reverse('webtest_index')) 57 return render_to_response('webtest/login.html', {'gw':settings.SERVER_LIST})
58
59 60 -def logout (request):
61 _session_logout(request, request.session['server']) 62 try: 63 del request.session['username'] 64 except KeyError: 65 logger.error(traceback.format_exc()) 66 try: 67 del request.session['password'] 68 except KeyError: 69 logger.error(traceback.format_exc()) 70 71 #request.session.set_expiry(1) 72 return HttpResponseRedirect(reverse('webtest_index'))
73
74 75 @isUserConnected # wrapper handles login (or redirects to webclient login). Connection passed in **kwargs 76 -def index(request, **kwargs):
77 conn = kwargs['conn'] 78 return render_to_response('webtest/index.html', {'conn': conn})
79
80 81 @isUserConnected 82 -def channel_overlay_viewer(request, imageId, **kwargs):
83 """ 84 Viewer for overlaying separate channels from the same image or different images 85 and adjusting horizontal and vertical alignment of each 86 """ 87 conn = kwargs['conn'] 88 89 image = conn.getObject("Image", imageId) 90 default_z = image.getSizeZ()/2 91 92 # try to work out which channels should be 'red', 'green', 'blue' based on rendering settings 93 red = None 94 green = None 95 blue = None 96 notAssigned = [] 97 channels = [] 98 for i, c in enumerate(image.getChannels()): 99 channels.append( {'name':c.getName()} ) 100 if c.getColor().getRGB() == (255, 0, 0) and red == None: 101 red = i 102 elif c.getColor().getRGB() == (0, 255, 0) and green == None: 103 green = i 104 elif c.getColor().getRGB() == (0, 0, 255) and blue == None: 105 blue = i 106 else: 107 notAssigned.append(i) 108 # any not assigned - try assigning 109 for i in notAssigned: 110 if red == None: red = i 111 elif green == None: green = i 112 elif blue == None: blue = i 113 114 # see if we have z, x, y offsets already annotated on this image 115 # added by javascript in viewer. E.g. 0|z:1_x:0_y:0,1|z:0_x:10_y:0,2|z:0_x:0_y:0 116 ns = "omero.web.channel_overlay.offsets" 117 comment = image.getAnnotation(ns) 118 if comment == None: # maybe offset comment has been added manually (no ns) 119 for ann in image.listAnnotations(): 120 if isinstance(ann, omero.gateway.CommentAnnotationWrapper): 121 if ann.getValue().startswith("0|z:"): 122 comment = ann 123 break 124 if comment != None: 125 offsets = comment.getValue() 126 for o in offsets.split(","): 127 index,zxy = o.split("|",1) 128 if int(index) < len(channels): 129 keyVals = zxy.split("_") 130 for kv in keyVals: 131 key, val = kv.split(":") 132 if key == "z": val = int(val) + default_z 133 channels[int(index)][key] = int(val) 134 135 return render_to_response('webtest/demo_viewers/channel_overlay_viewer.html', { 136 'image': image, 'channels':channels, 'default_z':default_z, 'red': red, 'green': green, 'blue': blue})
137
138 139 @isUserConnected 140 -def render_channel_overlay (request, **kwargs):
141 """ 142 Overlays separate channels (red, green, blue) from the same image or different images 143 manipulating each indepdently (translate, scale, rotate etc? ) 144 """ 145 conn = kwargs['conn'] 146 147 # request holds info on all the planes we are working on and offset (may not all be visible) 148 # planes=0|imageId:z:c:t$x:shift_y:shift_rot:etc,1|imageId... 149 # E.g. planes=0|2305:7:0:0$x:-50_y:10,1|2305:7:1:0,2|2305:7:2:0&red=2&blue=0&green=1 150 planes = {} 151 p = request.REQUEST.get('planes', None) 152 for plane in p.split(','): 153 infoMap = {} 154 plane_info = plane.split('|') 155 key = plane_info[0].strip() 156 info = plane_info[1].strip() 157 shift = None 158 if info.find('$')>=0: 159 info,shift = info.split('$') 160 imageId,z,c,t = [int(i) for i in info.split(':')] 161 infoMap['imageId'] = imageId 162 infoMap['z'] = z 163 infoMap['c'] = c 164 infoMap['t'] = t 165 if shift != None: 166 for kv in shift.split("_"): 167 k, v = kv.split(":") 168 infoMap[k] = v 169 planes[key] = infoMap 170 171 # from the request we need to know which plane is blue, green, red (if any) by index 172 # E.g. red=0&green=2 173 red = request.REQUEST.get('red', None) 174 green = request.REQUEST.get('green', None) 175 blue = request.REQUEST.get('blue', None) 176 177 # kinda like split-view: we want to get single-channel images... 178 # red... 179 redImg = None 180 181 def translate(image, deltaX, deltaY): 182 183 xsize, ysize = image.size 184 mode = image.mode 185 bg = Image.new(mode, image.size) 186 x = abs(min(deltaX, 0)) 187 pasteX = max(0, deltaX) 188 y = abs(min(deltaY, 0)) 189 pasteY = max(0, deltaY) 190 191 part = image.crop((x, y, xsize-deltaX, ysize-deltaY)) 192 bg.paste(part, (pasteX, pasteY)) 193 return bg
194 195 def getPlane(planeInfo): 196 """ Returns the rendered plane split into a single channel (ready for merging) """ 197 img = conn.getObject("Image", planeInfo['imageId']) 198 img.setActiveChannels((planeInfo['c']+1,)) 199 img.setGreyscaleRenderingModel() 200 rgb = img.renderImage(planeInfo['z'], planeInfo['t']) 201 r,g,b = rgb.split() # go from RGB to L 202 203 x,y = 0,0 204 if 'x' in planeInfo: 205 x = int(planeInfo['x']) 206 if 'y' in planeInfo: 207 y = int(planeInfo['y']) 208 209 if x or y: 210 r = translate(r, x, y) 211 return r 212 213 redChannel = None 214 greenChannel = None 215 blueChannel = None 216 if red != None and red in planes: 217 redChannel = getPlane(planes[red]) 218 if green != None and green in planes: 219 greenChannel = getPlane(planes[green]) 220 if blue != None and blue in planes: 221 blueChannel = getPlane(planes[blue]) 222 223 if redChannel != None: 224 size = redChannel.size 225 elif greenChannel != None: 226 size = greenChannel.size 227 elif blueChannel != None: 228 size = blueChannel.size 229 230 black = Image.new('L', size) 231 redChannel = redChannel and redChannel or black 232 greenChannel = greenChannel and greenChannel or black 233 blueChannel = blueChannel and blueChannel or black 234 235 merge = Image.merge("RGB", (redChannel, greenChannel, blueChannel)) 236 # convert from PIL back to string image data 237 rv = StringIO() 238 compression = 0.9 239 merge.save(rv, 'jpeg', quality=int(compression*100)) 240 jpeg_data = rv.getvalue() 241 242 rsp = HttpResponse(jpeg_data, mimetype='image/jpeg') 243 return rsp 244
245 @isUserConnected 246 -def metadata (request, iid, **kwargs):
247 from omeroweb.webclient.forms import MetadataFilterForm, MetadataDetectorForm, MetadataChannelForm, \ 248 MetadataEnvironmentForm, MetadataObjectiveForm, MetadataStageLabelForm, \ 249 MetadataLightSourceForm, MetadataDichroicForm, MetadataMicroscopeForm 250 251 conn = kwargs['conn'] 252 253 form_environment = None 254 form_objective = None 255 form_microscope = None 256 form_stageLabel = None 257 form_filters = list() 258 form_detectors = list() 259 form_channels = list() 260 form_lasers = list() 261 262 image = conn.getObject("Image", iid) 263 om = image.loadOriginalMetadata() 264 global_metadata = sorted(om[1]) 265 series_metadata = sorted(om[2]) 266 267 268 for ch in image.getChannels(): 269 if ch.getLogicalChannel() is not None: 270 channel = dict() 271 channel['form'] = MetadataChannelForm(initial={'logicalChannel': ch.getLogicalChannel(), 272 'illuminations': list(conn.getEnumerationEntries("IlluminationI")), 273 'contrastMethods': list(conn.getEnumerationEntries("ContrastMethodI")), 274 'modes': list(conn.getEnumerationEntries("AcquisitionModeI"))}) 275 channel['form_emission_filters'] = list() 276 if ch.getLogicalChannel().getLightPath().copyEmissionFilters(): 277 for f in ch.getLogicalChannel().getLightPath().copyEmissionFilters(): 278 channel['form_filters'].append(MetadataFilterForm(initial={'filter': f, 279 'types':list(conn.getEnumerationEntries("FilterTypeI"))})) 280 channel['form_excitation_filters'] = list() 281 if ch.getLogicalChannel().getLightPath().copyExcitationFilters(): 282 for f in ch.getLogicalChannel().getLightPath().copyExcitationFilters(): 283 channel['form_excitation_filters'].append(MetadataFilterForm(initial={'filter': f, 284 'types':list(conn.getEnumerationEntries("FilterTypeI"))})) 285 286 if ch.getLogicalChannel().getDetectorSettings()._obj is not None: 287 channel['form_detector_settings'] = MetadataDetectorForm(initial={'detectorSettings':ch.getLogicalChannel().getDetectorSettings(), 'detector': ch.getLogicalChannel().getDetectorSettings().getDetector(), 288 'types':list(conn.getEnumerationEntries("DetectorTypeI"))}) 289 if ch.getLogicalChannel().getLightSourceSettings()._obj is not None: 290 channel['form_light_source'] = MetadataLightSourceForm(initial={'lightSource': ch.getLogicalChannel().getLightSourceSettings(), 291 'types':list(conn.getEnumerationEntries("FilterTypeI")), 292 'mediums': list(conn.getEnumerationEntries("LaserMediumI")), 293 'pulses': list(conn.getEnumerationEntries("PulseI"))}) 294 if ch.getLogicalChannel().getFilterSet()._obj is not None and ch.getLogicalChannel().getFilterSet().getDichroic()._obj: 295 channel['form_dichroic'] = MetadataDichroicForm(initial={'logicalchannel': ch.getLogicalChannel().getFilterSet().getDichroic()}) 296 channel['name'] = ch.getName() 297 channel['color'] = ch.getColor().getHtml() 298 form_channels.append(channel) 299 300 if image.getObjectiveSettings() is not None: 301 form_objective = MetadataObjectiveForm(initial={'objectiveSettings': image.getObjectiveSettings(), 302 'mediums': list(conn.getEnumerationEntries("MediumI")), 303 'immersions': list(conn.getEnumerationEntries("ImmersionI")), 304 'corrections': list(conn.getEnumerationEntries("CorrectionI")) }) 305 if image.getImagingEnvironment() is not None: 306 form_environment = MetadataEnvironmentForm(initial={'image': image}) 307 if image.getStageLabel() is not None: 308 form_stageLabel = MetadataStageLabelForm(initial={'image': image }) 309 310 if image.getInstrument() is not None: 311 if image.getInstrument().getMicroscope() is not None: 312 form_microscope = MetadataMicroscopeForm(initial={'microscopeTypes':list(conn.getEnumerationEntries("MicroscopeTypeI")), 'microscope': image.getInstrument().getMicroscope()}) 313 314 if image.getInstrument().getFilters() is not None: 315 filters = list(image.getInstrument().getFilters()) 316 for f in filters: 317 form_filter = MetadataFilterForm(initial={'filter': f, 'types':list(conn.getEnumerationEntries("FilterTypeI"))}) 318 form_filters.append(form_filter) 319 320 if image.getInstrument().getDetectors() is not None: 321 detectors = list(image.getInstrument().getDetectors()) 322 for d in detectors: 323 form_detector = MetadataDetectorForm(initial={'detectorSettings':None, 'detector': d, 'types':list(conn.getEnumerationEntries("DetectorTypeI"))}) 324 form_detectors.append(form_detector) 325 326 if image.getInstrument().getLightSources() is not None: 327 lasers = list(image.getInstrument().getLightSources()) 328 for l in lasers: 329 form_laser = MetadataLightSourceForm(initial={'lightSource': l, 330 'types':list(conn.getEnumerationEntries("FilterTypeI")), 331 'mediums': list(conn.getEnumerationEntries("LaserMediumI")), 332 'pulses': list(conn.getEnumerationEntries("PulseI"))}) 333 form_lasers.append(form_laser) 334 335 # Annotations # 336 text_annotations = list() 337 long_annotations = {'rate': 0.00 , 'votes': 0} 338 url_annotations = list() 339 file_annotations = list() 340 tag_annotations = list() 341 342 from omero.model import CommentAnnotationI, LongAnnotationI, TagAnnotationI, FileAnnotationI 343 344 for ann in image.listAnnotations(): 345 if isinstance(ann._obj, CommentAnnotationI): 346 text_annotations.append(ann) 347 elif isinstance(ann._obj, LongAnnotationI): 348 long_annotations['votes'] += 1 349 long_annotations['rate'] += int(ann.longValue) 350 elif isinstance(ann._obj, FileAnnotationI): 351 file_annotations.append(ann) 352 elif isinstance(ann._obj, TagAnnotationI): 353 tag_annotations.append(ann) 354 355 txannSize = len(text_annotations) 356 urlannSize = len(url_annotations) 357 fileannSize = len(file_annotations) 358 tgannSize = len(tag_annotations) 359 if long_annotations['votes'] > 0: 360 long_annotations['rate'] /= long_annotations['votes'] 361 362 return render_to_response('webtest/metadata.html', {'image': image, 'text_annotations': text_annotations, 'txannSize':txannSize, 'long_annotations': long_annotations, 'url_annotations': url_annotations, 'urlannSize':urlannSize, 'file_annotations': file_annotations, 'fileannSize':fileannSize, 'tag_annotations': tag_annotations, 'tgannSize':tgannSize, 'global_metadata':global_metadata, 'serial_metadata':series_metadata, 'form_channels':form_channels, 'form_environment':form_environment, 'form_objective':form_objective, 'form_microscope':form_microscope, 'form_filters':form_filters, 'form_detectors':form_detectors, 'form_lasers':form_lasers, 'form_stageLabel':form_stageLabel})
363
364 @isUserConnected 365 -def roi_viewer(request, roi_library, imageId, **kwargs):
366 """ 367 Displays an image, using 'jquery.drawinglibrary.js' to draw ROIs on the image. 368 """ 369 conn = kwargs['conn'] 370 371 image = conn.getObject("Image", imageId) 372 default_z = image.getSizeZ()/2 373 374 templates = {"processing":'webtest/roi_viewers/processing_viewer.html', 375 "jquery": "webtest/roi_viewers/jquery_drawing.html", 376 "raphael":"webtest/roi_viewers/raphael_viewer.html"} 377 378 template = templates[roi_library] 379 380 return render_to_response(template, {'image':image, 'default_z':default_z})
381
382 383 @isUserConnected 384 -def add_annotations (request, **kwargs):
385 """ 386 Creates a L{omero.gateway.CommentAnnotationWrapper} and adds it to the images according 387 to variables in the http request. 388 389 @param request: The django L{django.core.handlers.wsgi.WSGIRequest} 390 - imageIds: A comma-delimited list of image IDs 391 - comment: The text to add as a comment to the images 392 - ns: Namespace for the annotation 393 - replace: If "true", try to replace existing annotation with same ns 394 395 @return: A simple html page with a success message 396 """ 397 398 conn = kwargs['conn'] 399 400 idList = request.REQUEST.get('imageIds', None) # comma - delimited list 401 if idList: 402 imageIds = [long(i) for i in idList.split(",")] 403 else: imageIds = [] 404 405 comment = request.REQUEST.get('comment', None) 406 ns = request.REQUEST.get('ns', None) 407 replace = request.REQUEST.get('replace', False) in ('true', 'True') 408 409 updateService = conn.getUpdateService() 410 ann = omero.model.CommentAnnotationI() 411 ann.setTextValue(rstring( str(comment) )) 412 if ns != None: 413 ann.setNs(rstring( str(ns) )) 414 ann = updateService.saveAndReturnObject(ann) 415 annId = ann.getId().getValue() 416 417 images = [] 418 for iId in imageIds: 419 image = conn.getObject("Image", iId) 420 if image == None: continue 421 if replace and ns != None: 422 oldComment = image.getAnnotation(ns) 423 if oldComment != None: 424 oldComment.setTextValue(rstring( str(comment) )) 425 updateService.saveObject(oldComment) 426 continue 427 l = omero.model.ImageAnnotationLinkI() 428 parent = omero.model.ImageI(iId, False) # use unloaded object to avoid update conflicts 429 l.setParent(parent) 430 l.setChild(ann) 431 updateService.saveObject(l) 432 images.append(image) 433 434 return render_to_response('webtest/util/add_annotations.html', {'images':images, 'comment':comment})
435
436 437 @isUserConnected 438 -def split_view_figure (request, **kwargs):
439 """ 440 Generates an html page displaying a number of images in a grid with channels split into different columns. 441 The page also includes a form for modifying various display parameters and re-submitting 442 to regenerate this page. 443 If no 'imageIds' parameter (comma-delimited list) is found in the 'request', the page generated is simply 444 a form requesting image IDs. 445 If there are imageIds, the first ID (image) is used to generate the form based on channels of that image. 446 447 @param request: The django L{http request <django.core.handlers.wsgi.WSGIRequest>} 448 449 @return: The http response - html page displaying split view figure. 450 """ 451 452 conn = kwargs['conn'] 453 454 query_string = request.META["QUERY_STRING"] 455 456 457 idList = request.REQUEST.get('imageIds', None) # comma - delimited list 458 if idList: 459 imageIds = [long(i) for i in idList.split(",")] 460 else: 461 imageIds = [] 462 463 split_grey = request.REQUEST.get('split_grey', None) 464 merged_names = request.REQUEST.get('merged_names', None) 465 proj = request.REQUEST.get('proj', "normal") # intmean, intmax, normal 466 try: 467 w = request.REQUEST.get('width', 0) 468 width = int(w) 469 except: 470 width = 0 471 try: 472 h = request.REQUEST.get('height', 0) 473 height = int(h) 474 except: 475 height = 0 476 477 # returns a list of channel info from the image, overridden if values in request 478 def getChannelData(image): 479 channels = [] 480 i = 0; 481 for i, c in enumerate(image.getChannels()): 482 name = request.REQUEST.get('cName%s' % i, c.getLogicalChannel().getName()) 483 # if we have channel info from a form, we know that checkbox:None is unchecked (not absent) 484 if request.REQUEST.get('cName%s' % i, None): 485 active = (None != request.REQUEST.get('cActive%s' % i, None) ) 486 merged = (None != request.REQUEST.get('cMerged%s' % i, None) ) 487 else: 488 active = True 489 merged = True 490 colour = c.getColor().getHtml() 491 start = request.REQUEST.get('cStart%s' % i, c.getWindowStart()) 492 end = request.REQUEST.get('cEnd%s' % i, c.getWindowEnd()) 493 render_all = (None != request.REQUEST.get('cRenderAll%s' % i, None) ) 494 channels.append({"name": name, "index": i, "active": active, "merged": merged, "colour": colour, 495 "start": start, "end": end, "render_all": render_all}) 496 return channels
497 498 channels = None 499 images = [] 500 for iId in imageIds: 501 image = conn.getObject("Image", iId) 502 if image == None: continue 503 default_z = image.getSizeZ()/2 # image.getZ() returns 0 - should return default Z? 504 # need z for render_image even if we're projecting 505 images.append({"id":iId, "z":default_z, "name": image.getName() }) 506 if channels == None: 507 channels = getChannelData(image) 508 if height == 0: 509 height = image.getSizeY() 510 if width == 0: 511 width = image.getSizeX() 512 513 size = {"height": height, "width": width} 514 c_strs = [] 515 if channels: # channels will be none when page first loads (no images) 516 indexes = range(1, len(channels)+1) 517 c_string = ",".join(["-%s" % str(c) for c in indexes]) # E.g. -1,-2,-3,-4 518 mergedFlags = [] 519 for i, c, in enumerate(channels): 520 if c["render_all"]: 521 levels = "%s:%s" % (c["start"], c["end"]) 522 else: levels = "" 523 if c["active"]: 524 onFlag = str(i+1) + "|" 525 onFlag += levels 526 if split_grey: onFlag += "$FFFFFF" # E.g. 1|100:505$0000FF 527 c_strs.append( c_string.replace("-%s" % str(i+1), onFlag) ) # E.g. 1,-2,-3 or 1|$FFFFFF,-2,-3 528 if c["merged"]: 529 mergedFlags.append("%s|%s" % (i+1, levels)) # E.g. '1|200:4000' 530 else: mergedFlags.append("-%s" % (i+1)) # E.g. '-1' 531 # turn merged channels on in the last image 532 c_strs.append( ",".join(mergedFlags) ) 533 534 return render_to_response('webtest/demo_viewers/split_view_figure.html', {'images':images, 'c_strs': c_strs,'imageIds':idList, 535 'channels': channels, 'split_grey':split_grey, 'merged_names': merged_names, 'proj': proj, 'size': size, 'query_string':query_string}) 536
537 538 @isUserConnected 539 -def dataset_split_view (request, datasetId, **kwargs):
540 """ 541 Generates a web page that displays a dataset in two panels, with the option to choose different 542 rendering settings (channels on/off) for each panel. It uses the render_image url for each 543 image, generating the full sized image which is scaled down to view. 544 545 The page also includes a form for editing the channel settings and display size of images. 546 This form resubmits to this page and displays the page again with updated parameters. 547 548 @param request: The django L{http request <django.core.handlers.wsgi.WSGIRequest>} 549 @param datasetId: The ID of the dataset. 550 @type datasetId: Number. 551 552 @return: The http response - html page displaying split view figure. 553 """ 554 555 conn = kwargs['conn'] 556 557 dataset = conn.getObject("Dataset", datasetId) 558 559 try: 560 w = request.REQUEST.get('width', 100) 561 width = int(w) 562 except: 563 width = 100 564 try: 565 h = request.REQUEST.get('height', 100) 566 height = int(h) 567 except: 568 height = 100 569 570 # returns a list of channel info from the image, overridden if values in request 571 def getChannelData(image): 572 channels = [] 573 i = 0; 574 for i, c in enumerate(image.getChannels()): 575 name = c.getLogicalChannel().getName() 576 # if we have channel info from a form, we know that checkbox:None is unchecked (not absent) 577 if request.REQUEST.get('cStart%s' % i, None): 578 active_left = (None != request.REQUEST.get('cActiveLeft%s' % i, None) ) 579 active_right = (None != request.REQUEST.get('cActiveRight%s' % i, None) ) 580 else: 581 active_left = True 582 active_right = True 583 colour = c.getColor().getHtml() 584 start = request.REQUEST.get('cStart%s' % i, c.getWindowStart()) 585 end = request.REQUEST.get('cEnd%s' % i, c.getWindowEnd()) 586 render_all = (None != request.REQUEST.get('cRenderAll%s' % i, None) ) 587 channels.append({"name": name, "index": i, "active_left": active_left, "active_right": active_right, 588 "colour": colour, "start": start, "end": end, "render_all": render_all}) 589 return channels
590 591 images = [] 592 channels = None 593 594 for image in dataset.listChildren(): 595 if channels == None: 596 channels = getChannelData(image) 597 default_z = image.getSizeZ()/2 # image.getZ() returns 0 - should return default Z? 598 # need z for render_image even if we're projecting 599 images.append({"id":image.getId(), "z":default_z, "name": image.getName() }) 600 601 size = {'width':width, 'height':height} 602 603 indexes = range(1, len(channels)+1) 604 c_string = ",".join(["-%s" % str(c) for c in indexes]) # E.g. -1,-2,-3,-4 605 606 leftFlags = [] 607 rightFlags = [] 608 for i, c, in enumerate(channels): 609 if c["render_all"]: 610 levels = "%s:%s" % (c["start"], c["end"]) 611 else: levels = "" 612 if c["active_left"]: 613 leftFlags.append("%s|%s" % (i+1, levels)) # E.g. '1|200:4000' 614 else: leftFlags.append("-%s" % (i+1)) # E.g. '-1' 615 if c["active_right"]: 616 rightFlags.append("%s|%s" % (i+1, levels)) # E.g. '1|200:4000' 617 else: rightFlags.append("-%s" % (i+1)) # E.g. '-1' 618 619 c_left = ",".join(leftFlags) 620 c_right = ",".join(rightFlags) 621 622 return render_to_response('webtest/demo_viewers/dataset_split_view.html', {'dataset': dataset, 'images': images, 623 'channels':channels, 'size': size, 'c_left': c_left, 'c_right': c_right}) 624
625 626 @isUserConnected 627 -def image_dimensions (request, imageId, **kwargs):
628 """ 629 Prepare data to display various dimensions of a multi-dim image as axes of a grid of image planes. 630 E.g. x-axis = Time, y-axis = Channel. 631 If the image has spim data, then combine images with different SPIM angles to provide an additional 632 dimension. Also get the SPIM data from various XML annotations and display on page. 633 """ 634 635 conn = kwargs['conn'] 636 637 image = conn.getObject("Image", imageId) 638 if image is None: 639 return render_to_response('webtest/demo_viewers/image_dimensions.html', {}) 640 641 mode = request.REQUEST.get('mode', None) and 'g' or 'c' 642 dims = {'Z':image.getSizeZ(), 'C': image.getSizeC(), 'T': image.getSizeT()} 643 644 default_yDim = 'Z' 645 646 spim_data = getSpimData(conn, image) 647 if spim_data is not None: 648 dims['Angle'] = len(spim_data['images']) 649 default_yDim = 'Angle' 650 651 xDim = request.REQUEST.get('xDim', 'T') 652 if xDim not in dims.keys(): 653 xDim = 'T' 654 655 yDim = request.REQUEST.get('yDim', default_yDim) 656 if yDim not in dims.keys(): 657 yDim = 'Z' 658 659 xFrames = int(request.REQUEST.get('xFrames', 5)) 660 xSize = dims[xDim] 661 yFrames = int(request.REQUEST.get('yFrames', 5)) 662 ySize = dims[yDim] 663 664 xFrames = min(xFrames, xSize) 665 yFrames = min(yFrames, ySize) 666 667 xRange = range(xFrames) 668 yRange = range(yFrames) 669 670 # 2D array of (theZ, theC, theT) 671 grid = [] 672 for y in yRange: 673 grid.append([]) 674 for x in xRange: 675 iid, theZ, theC, theT = image.id, 0,None,0 676 if xDim == 'Z': 677 theZ = x 678 if xDim == 'C': 679 theC = x 680 if xDim == 'T': 681 theT = x 682 if xDim == 'Angle': 683 iid = spim_data['images'][x].id 684 if yDim == 'Z': 685 theZ = y 686 if yDim == 'C': 687 theC = y 688 if yDim == 'T': 689 theT = y 690 if yDim == 'Angle': 691 iid = spim_data['images'][y].id 692 693 grid[y].append( (iid, theZ, theC is not None and theC+1 or None, theT) ) 694 695 696 size = {"height": 125, "width": 125} 697 698 return render_to_response('webtest/demo_viewers/image_dimensions.html', {'image':image, 'spim_data':spim_data, 'grid': grid, 699 "size": size, "mode":mode, 'xDim':xDim, 'xRange':xRange, 'yRange':yRange, 'yDim':yDim, 700 'xFrames':xFrames, 'yFrames':yFrames})
701
702 703 @isUserConnected 704 -def image_viewer (request, iid, **kwargs):
705 """ This view is responsible for showing pixel data as images """ 706 707 conn = kwargs['conn'] 708 709 kwargs['viewport_server'] = '/webclient' 710 711 return webgateway_views.full_viewer(request, iid, _conn=conn, **kwargs)
712