Package omero :: Module tables
[hide private]
[frames] | no frames]

Source Code for Module omero.tables

  1  #!/usr/bin/env python 
  2  # 
  3  # OMERO Tables Interface 
  4  # Copyright 2009 Glencoe Software, Inc.  All Rights Reserved. 
  5  # Use is subject to license terms supplied in LICENSE.txt 
  6  # 
  7   
  8  import os 
  9  import Ice 
 10  import time 
 11  import numpy 
 12  import signal 
 13  import logging 
 14  import threading 
 15  import traceback 
 16  import subprocess 
 17  import exceptions 
 18  import portalocker # Third-party 
 19   
 20  from path import path 
 21   
 22   
 23  import omero # Do we need both?? 
 24  import omero.clients 
 25  import omero.callbacks 
 26   
 27  # For ease of use 
 28  from omero.columns import * 
 29  from omero.rtypes import * 
 30  from omero.util.decorators import remoted, locked, perf 
 31  from omero_ext.functional import wraps 
 32   
 33   
 34  sys = __import__("sys") # Python sys 
 35  tables = __import__("tables") # Pytables 
36 37 -def slen(rv):
38 """ 39 Returns the length of the argument or None 40 if the argument is None 41 """ 42 if rv is None: 43 return None 44 return len(rv)
45
46 -def stamped(func, update = False):
47 """ 48 Decorator which takes the first argument after "self" and compares 49 that to the last modification time. If the stamp is older, then the 50 method call will throw an omero.OptimisticLockException. Otherwise, 51 execution will complete normally. If update is True, then the 52 last modification time will be updated after the method call if it 53 is successful. 54 55 Note: stamped implies locked 56 57 """ 58 def check_and_update_stamp(*args, **kwargs): 59 self = args[0] 60 stamp = args[1] 61 if stamp < self._stamp: 62 raise omero.OptimisticLockException(None, None, "Resource modified by another thread") 63 64 try: 65 return func(*args, **kwargs) 66 finally: 67 if update: 68 self._stamp = time.time()
69 checked_and_update_stamp = wraps(func)(check_and_update_stamp) 70 return locked(check_and_update_stamp) 71
72 73 -class HdfList(object):
74 """ 75 Since two calls to tables.openFile() return non-equal files 76 with equal fileno's, portalocker cannot be used to prevent 77 the creation of two HdfStorage instances from the same 78 Python process. 79 """ 80
81 - def __init__(self):
82 self.logger = logging.getLogger("omero.tables.HdfList") 83 self._lock = threading.RLock() 84 self.__filenos = {} 85 self.__paths = {} 86 self.__locks = {}
87 88 @locked
89 - def addOrThrow(self, hdfpath, hdfstorage):
90 91 if hdfpath in self.__locks: 92 raise omero.LockTimeout(None, None, "Path already in HdfList: %s" % hdfpath) 93 94 parent = path(hdfpath).parent 95 if not parent.exists(): 96 raise omero.ApiUsageException(None, None, "Parent directory does not exist: %s" % parent) 97 98 try: 99 lock = open(hdfpath, "a+") 100 portalocker.lock(lock, portalocker.LOCK_NB|portalocker.LOCK_EX) 101 self.__locks[hdfpath] = lock 102 except portalocker.LockException, le: 103 lock.close() 104 raise omero.LockTimeout(None, None, "Cannot acquire exclusive lock on: %s" % hdfpath, 0) 105 except: 106 lock.close() 107 raise 108 109 hdffile = hdfstorage.openfile("a") 110 fileno = hdffile.fileno() 111 if fileno in self.__filenos.keys(): 112 hdffile.close() 113 raise omero.LockTimeout(None, None, "File already opened by process: %s" % hdfpath, 0) 114 else: 115 self.__filenos[fileno] = hdfstorage 116 self.__paths[hdfpath] = hdfstorage 117 118 return hdffile
119 120 @locked
121 - def getOrCreate(self, hdfpath):
122 try: 123 return self.__paths[hdfpath] 124 except KeyError: 125 return HdfStorage(hdfpath) # Adds itself.
126 127 @locked
128 - def remove(self, hdfpath, hdffile):
129 del self.__filenos[hdffile.fileno()] 130 del self.__paths[hdfpath] 131 try: 132 if hdfpath in self.__locks: 133 try: 134 lock = self.__locks[hdfpath] 135 lock.close() 136 finally: 137 del self.__locks[hdfpath] 138 except exceptions.Exception, e: 139 self.logger.warn("Exception on remove(%s)" % hdfpath, exc_info=True)
140 141 # Global object for maintaining files 142 HDFLIST = HdfList()
143 144 -class HdfStorage(object):
145 """ 146 Provides HDF-storage for measurement results. At most a single 147 instance will be available for any given physical HDF5 file. 148 """ 149 150
151 - def __init__(self, file_path):
152 153 """ 154 file_path should be the path to a file in a valid directory where 155 this HDF instance can be stored (Not None or Empty). Once this 156 method is finished, self.__hdf_file is guaranteed to be a PyTables HDF 157 file, but not necessarily initialized. 158 """ 159 160 if file_path is None or str(file_path) == "": 161 raise omero.ValidationException(None, None, "Invalid file_path") 162 163 self.logger = logging.getLogger("omero.tables.HdfStorage") 164 165 self.__hdf_path = path(file_path) 166 # Locking first as described at: 167 # http://www.pytables.org/trac/ticket/185 168 self.__hdf_file = HDFLIST.addOrThrow(file_path, self) 169 self.__tables = [] 170 171 self._lock = threading.RLock() 172 self._stamp = time.time() 173 174 # These are what we'd like to have 175 self.__mea = None 176 self.__ome = None 177 178 try: 179 self.__ome = self.__hdf_file.root.OME 180 self.__mea = self.__ome.Measurements 181 self.__types = self.__ome.ColumnTypes[:] 182 self.__descriptions = self.__ome.ColumnDescriptions[:] 183 self.__initialized = True 184 except tables.NoSuchNodeError: 185 self.__initialized = False
186 187 # 188 # Non-locked methods 189 # 190
191 - def size(self):
192 return self.__hdf_path.size
193
194 - def openfile(self, mode):
195 try: 196 if self.__hdf_path.exists() and self.__hdf_path.size == 0: 197 mode = "w" 198 return tables.openFile(self.__hdf_path, mode=mode,\ 199 title="OMERO HDF Measurement Storage", rootUEP="/") 200 except (tables.HDF5ExtError, IOError), io: 201 msg = "HDFStorage initialized with bad path: %s" % self.__hdf_path 202 self.logger.error(msg) 203 raise omero.ValidationException(None, None, msg)
204
205 - def __initcheck(self):
206 if not self.__initialized: 207 raise omero.ApiUsageException(None, None, "Not yet initialized")
208
209 - def __width(self):
210 return len(self.__types)
211
212 - def __length(self):
213 return self.__mea.nrows
214
215 - def __sizecheck(self, colNumbers, rowNumbers):
216 if colNumbers is not None: 217 if len(colNumbers) > 0: 218 maxcol = max(colNumbers) 219 totcol = self.__width() 220 if maxcol >= totcol: 221 raise omero.ApiUsageException(None, None, "Column overflow: %s >= %s" % (maxcol, totcol)) 222 else: 223 raise omero.ApiUsageException(None, None, "Columns not specified: %s" % colNumbers) 224 225 226 if rowNumbers is not None: 227 if len(rowNumbers) > 0: 228 maxrow = max(rowNumbers) 229 totrow = self.__length() 230 if maxrow >= totrow: 231 raise omero.ApiUsageException(None, None, "Row overflow: %s >= %s" % (maxrow, totrow)) 232 else: 233 raise omero.ApiUsageException(None, None, "Rows not specified: %s" % rowNumbers)
234 235 # 236 # Locked methods 237 # 238 239 @locked
240 - def initialize(self, cols, metadata = None):
241 """ 242 243 """ 244 if metadata is None: metadata = {} 245 246 if self.__initialized: 247 raise omero.ValidationException(None, None, "Already initialized.") 248 249 if not cols: 250 raise omero.ApiUsageException(None, None, "No columns provided") 251 252 for c in cols: 253 if not c.name: 254 raise omero.ApiUsageException(None, None, "Column unnamed: %s" % c) 255 256 self.__definition = columns2definition(cols) 257 self.__ome = self.__hdf_file.createGroup("/", "OME") 258 self.__mea = self.__hdf_file.createTable(self.__ome, "Measurements", self.__definition) 259 260 self.__types = [ x.ice_staticId() for x in cols ] 261 self.__descriptions = [ (x.description != None) and x.description or "" for x in cols ] 262 self.__hdf_file.createArray(self.__ome, "ColumnTypes", self.__types) 263 self.__hdf_file.createArray(self.__ome, "ColumnDescriptions", self.__descriptions) 264 265 self.__mea.attrs.version = "v1" 266 self.__mea.attrs.initialized = time.time() 267 if metadata: 268 for k, v in metadata.items(): 269 self.__mea.attrs[k] = v 270 # See attrs._f_list("user") to retrieve these. 271 272 self.__mea.flush() 273 self.__hdf_file.flush() 274 self.__initialized = True
275 276 @locked
277 - def incr(self, table):
278 sz = len(self.__tables) 279 self.logger.info("Size: %s - Attaching %s to %s" % (sz, table, self.__hdf_path)) 280 if table in self.__tables: 281 self.logger.warn("Already added") 282 raise omero.ApiUsageException(None, None, "Already added") 283 self.__tables.append(table) 284 return sz + 1
285 286 @locked
287 - def decr(self, table):
288 sz = len(self.__tables) 289 self.logger.info("Size: %s - Detaching %s from %s", sz, table, self.__hdf_path) 290 if not (table in self.__tables): 291 self.logger.warn("Unknown table") 292 raise omero.ApiUsageException(None, None, "Unknown table") 293 self.__tables.remove(table) 294 if sz <= 1: 295 self.cleanup() 296 return sz - 1
297 298 @locked
299 - def uptodate(self, stamp):
300 return self._stamp <= stamp
301 302 @locked
303 - def rows(self):
304 self.__initcheck() 305 return self.__mea.nrows
306 307 @locked
308 - def cols(self, size, current):
309 self.__initcheck() 310 ic = current.adapter.getCommunicator() 311 types = self.__types 312 names = self.__mea.colnames 313 cols = [] 314 for i in range(len(types)): 315 t = types[i] 316 n = names[i] 317 try: 318 col = ic.findObjectFactory(t).create(t) 319 col.name = n 320 col.setsize(size) 321 col.settable(self.__mea) 322 cols.append(col) 323 except: 324 msg = traceback.format_exc() 325 raise omero.ValidationException(None, msg, "BAD COLUMN TYPE: %s for %s" % (t,n)) 326 return cols
327 328 @locked
329 - def get_meta_map(self):
330 self.__initcheck() 331 metadata = {} 332 attr = self.__mea.attrs 333 keys = list(self.__mea.attrs._v_attrnamesuser) 334 for key in keys: 335 val = attr[key] 336 if type(val) == numpy.float64: 337 val = rfloat(val) 338 elif type(val) == numpy.int32: 339 val = rint(val) 340 elif type(val) == numpy.int64: 341 val = rlong(val) 342 elif type(val) == numpy.string_: 343 val = rstring(val) 344 else: 345 raise omero.ValidationException("BAD TYPE: %s" % type(val)) 346 metadata[key] = val 347 return metadata
348 349 @locked
350 - def add_meta_map(self, m):
351 if not m: 352 return 353 self.__initcheck() 354 attr = self.__mea.attrs 355 for k, v in m.items(): 356 attr[k] = unwrap(v) 357 self.__mea.flush()
358 359 @locked
360 - def append(self, cols):
361 # Optimize! 362 arrays = [] 363 names = [] 364 sz = None 365 for col in cols: 366 if sz is None: 367 sz = col.getsize() 368 else: 369 if sz != col.getsize(): 370 raise omero.ValidationException("Columns are of differing length") 371 names.extend(col.names()) 372 arrays.extend(col.arrays()) 373 col.append(self.__mea) # Potential corruption !!! 374 records = numpy.rec.fromarrays(arrays, names=names) 375 self.__mea.append(records) 376 self.__mea.flush()
377 378 # 379 # Stamped methods 380 # 381 382 @stamped
383 - def update(self, stamp, data):
384 if data: 385 for i, rn in enumerate(data.rowNumbers): 386 for col in data.columns: 387 getattr(self.__mea.cols, col.name)[rn] = col.values[i] 388 self.__mea.flush()
389 390 @stamped
391 - def getWhereList(self, stamp, condition, variables, unused, start, stop, step):
392 self.__initcheck() 393 try: 394 return self.__mea.getWhereList(condition, variables, None, start, stop, step).tolist() 395 except (exceptions.NameError, exceptions.SyntaxError, exceptions.TypeError, exceptions.ValueError), err: 396 aue = omero.ApiUsageException() 397 aue.message = "Bad condition: %s, %s" % (condition, variables) 398 aue.serverStackTrace = "".join(traceback.format_exc()) 399 aue.serverExceptionClass = str(err.__class__.__name__) 400 raise aue
401
402 - def _as_data(self, cols, rowNumbers):
403 """ 404 Constructs a omero.grid.Data object for returning to the client. 405 """ 406 data = omero.grid.Data() 407 data.columns = cols 408 data.rowNumbers = rowNumbers 409 data.lastModification = long(self._stamp*1000) # Convert to millis since epoch 410 return data
411 412 @stamped
413 - def readCoordinates(self, stamp, rowNumbers, current):
414 self.__initcheck() 415 self.__sizecheck(None, rowNumbers) 416 cols = self.cols(None, current) 417 for col in cols: 418 col.readCoordinates(self.__mea, rowNumbers) 419 return self._as_data(cols, rowNumbers)
420 421 @stamped
422 - def read(self, stamp, colNumbers, start, stop, current):
423 self.__initcheck() 424 self.__sizecheck(colNumbers, None) 425 cols = self.cols(None, current) 426 rv = [] 427 l = 0 428 for i in colNumbers: 429 col = cols[i] 430 col.read(self.__mea, start, stop) 431 rv.append(col) 432 l = len(col.values) 433 return self._as_data(rv, range(start, start+l))
434 435 @stamped
436 - def slice(self, stamp, colNumbers, rowNumbers, current):
437 self.__initcheck() 438 439 if colNumbers is None or len(colNumbers) == 0: 440 colNumbers = range(self.__width()) 441 if rowNumbers is None or len(rowNumbers) == 0: 442 rowNumbers = range(self.__length()) 443 444 self.__sizecheck(colNumbers, rowNumbers) 445 cols = self.cols(None, current) 446 rv = [] 447 for i in colNumbers: 448 col = cols[i] 449 col.readCoordinates(self.__mea, rowNumbers) 450 rv.append(col) 451 return self._as_data(rv, rowNumbers)
452 453 # 454 # Lifecycle methods 455 # 456
457 - def check(self):
458 return True
459 460 @locked
461 - def cleanup(self):
462 self.logger.info("Cleaning storage: %s", self.__hdf_path) 463 if self.__mea: 464 self.__mea.flush() 465 self.__mea = None 466 if self.__ome: 467 self.__ome = None 468 if self.__hdf_file: 469 HDFLIST.remove(self.__hdf_path, self.__hdf_file) 470 hdffile = self.__hdf_file 471 self.__hdf_file = None 472 hdffile.close() # Resources freed
473
474 # End class HdfStorage 475 476 477 -class TableI(omero.grid.Table, omero.util.SimpleServant):
478 """ 479 Spreadsheet implementation based on pytables. 480 """ 481
482 - def __init__(self, ctx, file_obj, factory, storage, uuid = "unknown"):
483 self.uuid = uuid 484 self.file_obj = file_obj 485 self.factory = factory 486 self.storage = storage 487 self.can_write = factory.getAdminService().canUpdate(file_obj) 488 omero.util.SimpleServant.__init__(self, ctx) 489 490 self.stamp = time.time() 491 self.storage.incr(self)
492
493 - def assert_write(self):
494 """ 495 Checks that the current user can write to the given object 496 at the database level. If not, no FS level writes are permitted 497 either. 498 499 ticket:2910 500 """ 501 if not self.can_write: 502 raise omero.SecurityViolation("Current user cannot write to file %s" % self.file_obj.id.val)
503
504 - def check(self):
505 """ 506 Called periodically to check the resource is alive. Returns 507 False if this resource can be cleaned up. (Resources API) 508 """ 509 self.logger.debug("Checking %s" % self) 510 return True
511
512 - def cleanup(self):
513 """ 514 Decrements the counter on the held storage to allow it to 515 be cleaned up. 516 """ 517 if self.storage: 518 try: 519 self.storage.decr(self) 520 finally: 521 self.storage = None
522
523 - def __str__(self):
524 return "Table-%s" % self.uuid
525 526 @remoted 527 @perf
528 - def close(self, current = None):
529 530 size = None 531 if self.storage is not None: 532 size = self.storage.size() # Size to reset the server object to 533 534 try: 535 self.cleanup() 536 self.logger.info("Closed %s", self) 537 except: 538 self.logger.warn("Closed %s with errors", self) 539 540 if self.file_obj is not None and self.can_write: 541 fid = self.file_obj.id.val 542 if not self.file_obj.isLoaded() or\ 543 self.file_obj.getDetails() is None or\ 544 self.file_obj.details.group is None: 545 self.logger.warn("Cannot update file object %s since group is none", fid) 546 else: 547 gid = self.file_obj.details.group.id.val 548 client_uuid = self.factory.ice_getIdentity().category[8:] 549 ctx = {"omero.group": str(gid), omero.constants.CLIENTUUID: client_uuid} 550 try: 551 rfs = self.factory.createRawFileStore(ctx) 552 try: 553 rfs.setFileId(fid, ctx) 554 if size: 555 rfs.truncate(size, ctx) # May do nothing 556 rfs.write([], size, 0, ctx) # Force an update 557 else: 558 rfs.write([], 0, 0, ctx) # No-op 559 file_obj = rfs.save(ctx) 560 finally: 561 rfs.close(ctx) 562 self.logger.info("Updated file object %s to sha1=%s (%s bytes)",\ 563 self.file_obj.id.val, file_obj.sha1.val, file_obj.size.val) 564 except: 565 self.logger.warn("Failed to update file object %s", self.file_obj.id.val, exc_info=1)
566 567 # TABLES READ API ============================ 568 569 @remoted 570 @perf
571 - def getOriginalFile(self, current = None):
572 msg = "unknown" 573 if self.file_obj: 574 if self.file_obj.id: 575 msg = self.file_obj.id.val 576 self.logger.info("%s.getOriginalFile() => id=%s", self, msg) 577 return self.file_obj
578 579 @remoted 580 @perf
581 - def getHeaders(self, current = None):
582 rv = self.storage.cols(None, current) 583 self.logger.info("%s.getHeaders() => size=%s", self, slen(rv)) 584 return rv
585 586 @remoted 587 @perf
588 - def getNumberOfRows(self, current = None):
589 rv = self.storage.rows() 590 self.logger.info("%s.getNumberOfRows() => %s", self, rv) 591 return long(rv)
592 593 @remoted 594 @perf
595 - def getWhereList(self, condition, variables, start, stop, step, current = None):
596 variables = unwrap(variables) 597 if stop == 0: 598 stop = None 599 if step == 0: 600 step = None 601 rv = self.storage.getWhereList(self.stamp, condition, variables, None, start, stop, step) 602 self.logger.info("%s.getWhereList(%s, %s, %s, %s, %s) => size=%s", self, condition, variables, start, stop, step, slen(rv)) 603 return rv
604 605 @remoted 606 @perf
607 - def readCoordinates(self, rowNumbers, current = None):
608 self.logger.info("%s.readCoordinates(size=%s)", self, slen(rowNumbers)) 609 try: 610 return self.storage.readCoordinates(self.stamp, rowNumbers, current) 611 except tables.HDF5ExtError, err: 612 aue = omero.ApiUsageException() 613 aue.message = "Error reading coordinates. Most likely out of range" 614 aue.serverStackTrace = "".join(traceback.format_exc()) 615 aue.serverExceptionClass = str(err.__class__.__name__) 616 raise aue
617 618 @remoted 619 @perf
620 - def read(self, colNumbers, start, stop, current = None):
621 self.logger.info("%s.read(%s, %s, %s)", self, colNumbers, start, stop) 622 try: 623 return self.storage.read(self.stamp, colNumbers, start, stop, current) 624 except tables.HDF5ExtError, err: 625 aue = omero.ApiUsageException() 626 aue.message = "Error reading coordinates. Most likely out of range" 627 aue.serverStackTrace = "".join(traceback.format_exc()) 628 aue.serverExceptionClass = str(err.__class__.__name__) 629 raise aue
630 631 @remoted 632 @perf
633 - def slice(self, colNumbers, rowNumbers, current = None):
634 self.logger.info("%s.slice(size=%s, size=%s)", self, slen(colNumbers), slen(rowNumbers)) 635 return self.storage.slice(self.stamp, colNumbers, rowNumbers, current)
636 637 # TABLES WRITE API =========================== 638 639 @remoted 640 @perf
641 - def initialize(self, cols, current = None):
642 self.assert_write() 643 self.storage.initialize(cols) 644 if cols: 645 self.logger.info("Initialized %s with %s col(s)", self, slen(cols))
646 647 @remoted 648 @perf
649 - def addColumn(self, col, current = None):
650 self.assert_write() 651 raise omero.ApiUsageException(None, None, "NYI")
652 653 @remoted 654 @perf
655 - def addData(self, cols, current = None):
656 self.assert_write() 657 self.storage.append(cols) 658 sz = 0 659 if cols and cols[0] and cols[0].getsize(): 660 self.logger.info("Added %s row(s) of data to %s", cols[0].getsize(), self)
661 662 @remoted 663 @perf
664 - def update(self, data, current = None):
665 self.assert_write() 666 if data: 667 self.storage.update(self.stamp, data) 668 self.logger.info("Updated %s row(s) of data to %s", slen(data.rowNumbers), self)
669 670 @remoted 671 @perf
672 - def delete(self, current = None):
673 self.assert_write() 674 self.close() 675 prx = self.factory.getDeleteService() 676 dc = omero.api.delete.DeleteCommand("/OriginalFile", self.file_obj.id.val, None) 677 handle = prx.queueDelete([dc]) 678 self.file_obj = None 679 # TODO: possible just return handle? 680 cb = omero.callbacks.DeleteCallbackI(current.adapter, handle) 681 count = 10 682 while count: 683 count -= 1 684 rv = cb.block(500) 685 if rv is not None: 686 report = handle.report()[0] 687 if rv > 0: 688 raise omero.InternalException(None, None, report.error) 689 else: 690 return 691 raise omero.InternalException(None, None, "delete timed-out")
692 693 694 # TABLES METADATA API =========================== 695 696 @remoted 697 @perf
698 - def getMetadata(self, key, current = None):
699 rv = self.storage.get_meta_map() 700 rv = rv.get(key) 701 self.logger.info("%s.getMetadata() => %s", self, unwrap(rv)) 702 return rv
703 704 @remoted 705 @perf
706 - def getAllMetadata(self, current = None):
707 rv = self.storage.get_meta_map() 708 self.logger.info("%s.getMetadata() => size=%s", self, slen(rv)) 709 return rv
710 711 @remoted 712 @perf
713 - def setMetadata(self, key, value, current = None):
714 self.assert_write() 715 self.storage.add_meta_map({key: value}) 716 self.logger.info("%s.setMetadata() => %s=%s", self, key, unwrap(value))
717 718 @remoted 719 @perf
720 - def setAllMetadata(self, value, current = None):
721 self.assert_write() 722 self.storage.add_meta_map({"key": wrap(value)}) 723 self.logger.info("%s.setMetadata() => number=%s", self, slen(value))
724
725 # Column methods missing 726 727 -class TablesI(omero.grid.Tables, omero.util.Servant):
728 """ 729 Implementation of the omero.grid.Tables API. Provides 730 spreadsheet like functionality across the OMERO.grid. 731 This servant serves as a session-less, user-less 732 resource for obtaining omero.grid.Table proxies. 733 734 The first major step in initialization is getting 735 a session. This will block until the Blitz server 736 is reachable. 737 """ 738
739 - def __init__(self,\ 740 ctx,\ 741 table_cast = omero.grid.TablePrx.uncheckedCast,\ 742 internal_repo_cast = omero.grid.InternalRepositoryPrx.checkedCast):
743 744 omero.util.Servant.__init__(self, ctx, needs_session = True) 745 746 # Storing these methods, mainly to allow overriding via 747 # test methods. Static methods are evil. 748 self._table_cast = table_cast 749 self._internal_repo_cast = internal_repo_cast 750 751 self.__stores = [] 752 self._get_dir() 753 self._get_uuid() 754 self._get_repo()
755
756 - def _get_dir(self):
757 """ 758 Second step in initialization is to find the .omero/repository 759 directory. If this is not created, then a required server has 760 not started, and so this instance will not start. 761 """ 762 wait = int(self.communicator.getProperties().getPropertyWithDefault("omero.repo.wait", "1")) 763 self.repo_dir = self.communicator.getProperties().getProperty("omero.repo.dir") 764 765 if not self.repo_dir: 766 # Implies this is the legacy directory. Obtain from server 767 self.repo_dir = self.ctx.getSession().getConfigService().getConfigValue("omero.data.dir") 768 769 self.repo_cfg = path(self.repo_dir) / ".omero" / "repository" 770 start = time.time() 771 while not self.repo_cfg.exists() and wait < (time.time() - start): 772 self.logger.info("%s doesn't exist; waiting 5 seconds..." % self.repo_cfg) 773 self.stop_event.wait(5) 774 if not self.repo_cfg.exists(): 775 msg = "No repository found: %s" % self.repo_cfg 776 self.logger.error(msg) 777 raise omero.ResourceError(None, None, msg)
778
779 - def _get_uuid(self):
780 """ 781 Third step in initialization is to find the database uuid 782 for this grid instance. Multiple OMERO.grids could be watching 783 the same directory. 784 """ 785 cfg = self.ctx.getSession().getConfigService() 786 self.db_uuid = cfg.getDatabaseUuid() 787 self.instance = self.repo_cfg / self.db_uuid
788
789 - def _get_repo(self):
790 """ 791 Fourth step in initialization is to find the repository object 792 for the UUID found in .omero/repository/<db_uuid>, and then 793 create a proxy for the InternalRepository attached to that. 794 """ 795 796 # Get and parse the uuid from the RandomAccessFile format from FileMaker 797 self.repo_uuid = (self.instance / "repo_uuid").lines()[0].strip() 798 if len(self.repo_uuid) != 38: 799 raise omero.ResourceError("Poorly formed UUID: %s" % self.repo_uuid) 800 self.repo_uuid = self.repo_uuid[2:] 801 802 # Using the repo_uuid, find our OriginalFile object 803 self.repo_obj = self.ctx.getSession().getQueryService().findByQuery("select f from OriginalFile f where sha1 = :uuid", 804 omero.sys.ParametersI().add("uuid", rstring(self.repo_uuid))) 805 self.repo_mgr = self.communicator.stringToProxy("InternalRepository-%s" % self.repo_uuid) 806 self.repo_mgr = self._internal_repo_cast(self.repo_mgr) 807 self.repo_svc = self.repo_mgr.getProxy()
808 809 @remoted
810 - def getRepository(self, current = None):
811 """ 812 Returns the Repository object for this Tables server. 813 """ 814 return self.repo_svc
815 816 @remoted 817 @perf
818 - def getTable(self, file_obj, factory, current = None):
819 """ 820 Create and/or register a table servant. 821 """ 822 823 # Will throw an exception if not allowed. 824 file_id = None 825 if file_obj is not None and file_obj.id is not None: 826 file_id = file_obj.id.val 827 self.logger.info("getTable: %s %s", file_id, current.ctx) 828 829 file_path = self.repo_mgr.getFilePath(file_obj) 830 p = path(file_path).dirname() 831 if not p.exists(): 832 p.makedirs() 833 834 storage = HDFLIST.getOrCreate(file_path) 835 id = Ice.Identity() 836 id.name = Ice.generateUUID() 837 table = TableI(self.ctx, file_obj, factory, storage, uuid = id.name) 838 self.resources.add(table) 839 840 prx = current.adapter.add(table, id) 841 return self._table_cast(prx)
842