Package omero :: Module tables
[hide private]
[frames] | no frames]

Source Code for Module omero.tables

  1  #!/usr/bin/env python 
  2  # 
  3  # OMERO Tables Interface 
  4  # Copyright 2009 Glencoe Software, Inc.  All Rights Reserved. 
  5  # Use is subject to license terms supplied in LICENSE.txt 
  6  # 
  7   
  8  import os 
  9  import Ice 
 10  import time 
 11  import numpy 
 12  import signal 
 13  import logging 
 14  import threading 
 15  import traceback 
 16  import subprocess 
 17  import exceptions 
 18  import portalocker # Third-party 
 19   
 20  from path import path 
 21   
 22   
 23  import omero # Do we need both?? 
 24  import omero.clients 
 25  import omero.callbacks 
 26   
 27  # For ease of use 
 28  from omero.columns import * 
 29  from omero.rtypes import * 
 30  from omero.util.decorators import remoted, locked, perf 
 31  from omero_ext.functional import wraps 
 32   
 33   
 34  sys = __import__("sys") # Python sys 
 35  tables = __import__("tables") # Pytables 
36 37 -def slen(rv):
38 """ 39 Returns the length of the argument or None 40 if the argument is None 41 """ 42 if rv is None: 43 return None 44 return len(rv)
45
46 -def stamped(func, update = False):
47 """ 48 Decorator which takes the first argument after "self" and compares 49 that to the last modification time. If the stamp is older, then the 50 method call will throw an omero.OptimisticLockException. Otherwise, 51 execution will complete normally. If update is True, then the 52 last modification time will be updated after the method call if it 53 is successful. 54 55 Note: stamped implies locked 56 57 """ 58 def check_and_update_stamp(*args, **kwargs): 59 self = args[0] 60 stamp = args[1] 61 if stamp < self._stamp: 62 raise omero.OptimisticLockException(None, None, "Resource modified by another thread") 63 64 try: 65 return func(*args, **kwargs) 66 finally: 67 if update: 68 self._stamp = time.time()
69 checked_and_update_stamp = wraps(func)(check_and_update_stamp) 70 return locked(check_and_update_stamp) 71
72 73 -class HdfList(object):
74 """ 75 Since two calls to tables.openFile() return non-equal files 76 with equal fileno's, portalocker cannot be used to prevent 77 the creation of two HdfStorage instances from the same 78 Python process. 79 """ 80
81 - def __init__(self):
82 self.logger = logging.getLogger("omero.tables.HdfList") 83 self._lock = threading.RLock() 84 self.__filenos = {} 85 self.__paths = {} 86 self.__locks = {}
87 88 @locked
89 - def addOrThrow(self, hdfpath, hdfstorage):
90 91 if hdfpath in self.__locks: 92 raise omero.LockTimeout(None, None, "Path already in HdfList: %s" % hdfpath) 93 94 parent = path(hdfpath).parent 95 if not parent.exists(): 96 raise omero.ApiUsageException(None, None, "Parent directory does not exist: %s" % parent) 97 98 try: 99 lock = open(hdfpath, "a+") 100 portalocker.lock(lock, portalocker.LOCK_NB|portalocker.LOCK_EX) 101 self.__locks[hdfpath] = lock 102 except portalocker.LockException, le: 103 lock.close() 104 raise omero.LockTimeout(None, None, "Cannot acquire exclusive lock on: %s" % self.__hdf_path, 0) 105 106 hdffile = hdfstorage.openfile("a") 107 fileno = hdffile.fileno() 108 if fileno in self.__filenos.keys(): 109 hdffile.close() 110 raise omero.LockTimeout(None, None, "File already opened by process: %s" % hdfpath, 0) 111 else: 112 self.__filenos[fileno] = hdfstorage 113 self.__paths[hdfpath] = hdfstorage 114 115 return hdffile
116 117 @locked
118 - def getOrCreate(self, hdfpath):
119 try: 120 return self.__paths[hdfpath] 121 except KeyError: 122 return HdfStorage(hdfpath) # Adds itself.
123 124 @locked
125 - def remove(self, hdfpath, hdffile):
126 del self.__filenos[hdffile.fileno()] 127 del self.__paths[hdfpath] 128 try: 129 if hdfpath in self.__locks: 130 try: 131 lock = self.__locks[hdfpath] 132 lock.close() 133 finally: 134 del self.__locks[hdfpath] 135 except exceptions.Exception, e: 136 self.logger.warn("Exception on remove(%s)" % hdfpath, exc_info=True)
137 138 # Global object for maintaining files 139 HDFLIST = HdfList()
140 141 -class HdfStorage(object):
142 """ 143 Provides HDF-storage for measurement results. At most a single 144 instance will be available for any given physical HDF5 file. 145 """ 146 147
148 - def __init__(self, file_path):
149 150 """ 151 file_path should be the path to a file in a valid directory where 152 this HDF instance can be stored (Not None or Empty). Once this 153 method is finished, self.__hdf_file is guaranteed to be a PyTables HDF 154 file, but not necessarily initialized. 155 """ 156 157 if file_path is None or str(file_path) == "": 158 raise omero.ValidationException(None, None, "Invalid file_path") 159 160 self.logger = logging.getLogger("omero.tables.HdfStorage") 161 162 self.__hdf_path = path(file_path) 163 # Locking first as described at: 164 # http://www.pytables.org/trac/ticket/185 165 self.__hdf_file = HDFLIST.addOrThrow(file_path, self) 166 self.__tables = [] 167 168 self._lock = threading.RLock() 169 self._stamp = time.time() 170 171 # These are what we'd like to have 172 self.__mea = None 173 self.__ome = None 174 175 try: 176 self.__ome = self.__hdf_file.root.OME 177 self.__mea = self.__ome.Measurements 178 self.__types = self.__ome.ColumnTypes[:] 179 self.__descriptions = self.__ome.ColumnDescriptions[:] 180 self.__initialized = True 181 except tables.NoSuchNodeError: 182 self.__initialized = False
183 184 # 185 # Non-locked methods 186 # 187
188 - def size(self):
189 return self.__hdf_path.size
190
191 - def openfile(self, mode):
192 try: 193 if self.__hdf_path.exists() and self.__hdf_path.size == 0: 194 mode = "w" 195 return tables.openFile(self.__hdf_path, mode=mode,\ 196 title="OMERO HDF Measurement Storage", rootUEP="/") 197 except (tables.HDF5ExtError, IOError), io: 198 msg = "HDFStorage initialized with bad path: %s" % self.__hdf_path 199 self.logger.error(msg) 200 raise omero.ValidationException(None, None, msg)
201
202 - def __initcheck(self):
203 if not self.__initialized: 204 raise omero.ApiUsageException(None, None, "Not yet initialized")
205
206 - def __width(self):
207 return len(self.__types)
208
209 - def __length(self):
210 return self.__mea.nrows
211
212 - def __sizecheck(self, colNumbers, rowNumbers):
213 if colNumbers is not None: 214 if len(colNumbers) > 0: 215 maxcol = max(colNumbers) 216 totcol = self.__width() 217 if maxcol >= totcol: 218 raise omero.ApiUsageException(None, None, "Column overflow: %s >= %s" % (maxcol, totcol)) 219 else: 220 raise omero.ApiUsageException(None, None, "Columns not specified: %s" % colNumbers) 221 222 223 if rowNumbers is not None: 224 if len(rowNumbers) > 0: 225 maxrow = max(rowNumbers) 226 totrow = self.__length() 227 if maxrow >= totrow: 228 raise omero.ApiUsageException(None, None, "Row overflow: %s >= %s" % (maxrow, totrow)) 229 else: 230 raise omero.ApiUsageException(None, None, "Rows not specified: %s" % rowNumbers)
231 232 # 233 # Locked methods 234 # 235 236 @locked
237 - def initialize(self, cols, metadata = {}):
238 """ 239 240 """ 241 242 if self.__initialized: 243 raise omero.ValidationException(None, None, "Already initialized.") 244 245 if not cols: 246 raise omero.ApiUsageException(None, None, "No columns provided") 247 248 for c in cols: 249 if not c.name: 250 raise omero.ApiUsageException(None, None, "Column unnamed: %s" % c) 251 252 self.__definition = columns2definition(cols) 253 self.__ome = self.__hdf_file.createGroup("/", "OME") 254 self.__mea = self.__hdf_file.createTable(self.__ome, "Measurements", self.__definition) 255 256 self.__types = [ x.ice_staticId() for x in cols ] 257 self.__descriptions = [ (x.description != None) and x.description or "" for x in cols ] 258 self.__hdf_file.createArray(self.__ome, "ColumnTypes", self.__types) 259 self.__hdf_file.createArray(self.__ome, "ColumnDescriptions", self.__descriptions) 260 261 self.__mea.attrs.version = "v1" 262 self.__mea.attrs.initialized = time.time() 263 if metadata: 264 for k, v in metadata.items(): 265 self.__mea.attrs[k] = v 266 # See attrs._f_list("user") to retrieve these. 267 268 self.__mea.flush() 269 self.__hdf_file.flush() 270 self.__initialized = True
271 272 @locked
273 - def incr(self, table):
274 sz = len(self.__tables) 275 self.logger.info("Size: %s - Attaching %s to %s" % (sz, table, self.__hdf_path)) 276 if table in self.__tables: 277 self.logger.warn("Already added") 278 raise omero.ApiUsageException(None, Non, "Already added") 279 self.__tables.append(table) 280 return sz + 1
281 282 @locked
283 - def decr(self, table):
284 sz = len(self.__tables) 285 self.logger.info("Size: %s - Detaching %s from %s", sz, table, self.__hdf_path) 286 if not (table in self.__tables): 287 self.logger.warn("Unknown table") 288 raise omero.ApiUsageException(None, None, "Unknown table") 289 self.__tables.remove(table) 290 if sz <= 1: 291 self.cleanup() 292 return sz - 1
293 294 @locked
295 - def uptodate(self, stamp):
296 return self._stamp <= stamp
297 298 @locked
299 - def rows(self):
300 self.__initcheck() 301 return self.__mea.nrows
302 303 @locked
304 - def cols(self, size, current):
305 self.__initcheck() 306 ic = current.adapter.getCommunicator() 307 types = self.__types 308 names = self.__mea.colnames 309 cols = [] 310 for i in range(len(types)): 311 t = types[i] 312 n = names[i] 313 try: 314 col = ic.findObjectFactory(t).create(t) 315 col.name = n 316 col.setsize(size) 317 col.settable(self.__mea) 318 cols.append(col) 319 except: 320 msg = traceback.format_exc() 321 raise omero.ValidationException(None, msg, "BAD COLUMN TYPE: %s for %s" % (t,n)) 322 return cols
323 324 @locked
325 - def get_meta_map(self):
326 self.__initcheck() 327 metadata = {} 328 attr = self.__mea.attrs 329 keys = list(self.__mea.attrs._v_attrnamesuser) 330 for key in keys: 331 val = attr[key] 332 if type(val) == numpy.float64: 333 val = rfloat(val) 334 elif type(val) == numpy.int32: 335 val = rint(val) 336 elif type(val) == numpy.int64: 337 val = rlong(val) 338 elif type(val) == numpy.string_: 339 val = rstring(val) 340 else: 341 raise omero.ValidationException("BAD TYPE: %s" % type(val)) 342 metadata[key] = val 343 return metadata
344 345 @locked
346 - def add_meta_map(self, m):
347 if not m: 348 return 349 self.__initcheck() 350 attr = self.__mea.attrs 351 for k, v in m.items(): 352 attr[k] = unwrap(v) 353 self.__mea.flush()
354 355 @locked
356 - def append(self, cols):
357 # Optimize! 358 arrays = [] 359 names = [] 360 sz = None 361 for col in cols: 362 if sz is None: 363 sz = col.getsize() 364 else: 365 if sz != col.getsize(): 366 raise omero.ValidationException("Columns are of differing length") 367 names.extend(col.names()) 368 arrays.extend(col.arrays()) 369 col.append(self.__mea) # Potential corruption !!! 370 records = numpy.rec.fromarrays(arrays, names=names) 371 self.__mea.append(records) 372 self.__mea.flush()
373 374 # 375 # Stamped methods 376 # 377 378 @stamped
379 - def update(self, stamp, data):
380 if data: 381 for rn in data.rowNumbers: 382 for col in data.columns: 383 getattr(self.__mea.cols, col.name)[rn] = col.values[rn] 384 self.__mea.flush()
385 386 @stamped
387 - def getWhereList(self, stamp, condition, variables, unused, start, stop, step):
388 self.__initcheck() 389 try: 390 return self.__mea.getWhereList(condition, variables, None, start, stop, step).tolist() 391 except (exceptions.NameError, exceptions.SyntaxError, exceptions.TypeError, exceptions.ValueError), err: 392 aue = omero.ApiUsageException() 393 aue.message = "Bad condition: %s, %s" % (condition, variables) 394 aue.serverStackTrace = "".join(traceback.format_exc()) 395 aue.serverExceptionClass = str(err.__class__.__name__) 396 raise aue
397
398 - def _as_data(self, cols, rowNumbers):
399 """ 400 Constructs a omero.grid.Data object for returning to the client. 401 """ 402 data = omero.grid.Data() 403 data.columns = cols 404 data.rowNumbers = rowNumbers 405 data.lastModification = long(self._stamp*1000) # Convert to millis since epoch 406 return data
407 408 @stamped
409 - def readCoordinates(self, stamp, rowNumbers, current):
410 self.__initcheck() 411 self.__sizecheck(None, rowNumbers) 412 cols = self.cols(None, current) 413 for col in cols: 414 col.readCoordinates(self.__mea, rowNumbers) 415 return self._as_data(cols, rowNumbers)
416 417 @stamped
418 - def read(self, stamp, colNumbers, start, stop, current):
419 self.__initcheck() 420 self.__sizecheck(colNumbers, None) 421 cols = self.cols(None, current) 422 rv = [] 423 l = 0 424 for i in colNumbers: 425 col = cols[i] 426 col.read(self.__mea, start, stop) 427 rv.append(col) 428 l = len(col.values) 429 return self._as_data(rv, range(start, start+l))
430 431 @stamped
432 - def slice(self, stamp, colNumbers, rowNumbers, current):
433 self.__initcheck() 434 435 if colNumbers is None or len(colNumbers) == 0: 436 colNumbers = range(self.__width()) 437 if rowNumbers is None or len(rowNumbers) == 0: 438 rowNumbers = range(self.__length()) 439 440 self.__sizecheck(colNumbers, rowNumbers) 441 cols = self.cols(None, current) 442 rv = [] 443 for i in colNumbers: 444 col = cols[i] 445 col.readCoordinates(self.__mea, rowNumbers) 446 rv.append(col) 447 return self._as_data(rv, rowNumbers)
448 449 # 450 # Lifecycle methods 451 # 452
453 - def check(self):
454 return True
455 456 @locked
457 - def cleanup(self):
458 self.logger.info("Cleaning storage: %s", self.__hdf_path) 459 if self.__mea: 460 self.__mea.flush() 461 self.__mea = None 462 if self.__ome: 463 self.__ome = None 464 if self.__hdf_file: 465 HDFLIST.remove(self.__hdf_path, self.__hdf_file) 466 hdffile = self.__hdf_file 467 self.__hdf_file = None 468 hdffile.close() # Resources freed
469
470 # End class HdfStorage 471 472 473 -class TableI(omero.grid.Table, omero.util.SimpleServant):
474 """ 475 Spreadsheet implementation based on pytables. 476 """ 477
478 - def __init__(self, ctx, file_obj, factory, storage, uuid = "unknown"):
479 self.uuid = uuid 480 self.file_obj = file_obj 481 self.factory = factory 482 self.storage = storage 483 self.can_write = factory.getAdminService().canUpdate(file_obj) 484 omero.util.SimpleServant.__init__(self, ctx) 485 486 self.stamp = time.time() 487 self.storage.incr(self)
488
489 - def assert_write(self):
490 """ 491 Checks that the current user can write to the given object 492 at the database level. If not, no FS level writes are permitted 493 either. 494 495 ticket:2910 496 """ 497 if not self.can_write: 498 raise omero.SecurityViolation("Current user cannot write to file %s" % self.file_obj.id.val)
499
500 - def check(self):
501 """ 502 Called periodically to check the resource is alive. Returns 503 False if this resource can be cleaned up. (Resources API) 504 """ 505 self.logger.debug("Checking %s" % self) 506 return True
507
508 - def cleanup(self):
509 """ 510 Decrements the counter on the held storage to allow it to 511 be cleaned up. 512 """ 513 if self.storage: 514 try: 515 self.storage.decr(self) 516 finally: 517 self.storage = None
518
519 - def __str__(self):
520 return "Table-%s" % self.uuid
521 522 @remoted 523 @perf
524 - def close(self, current = None):
525 526 size = None 527 if self.storage is not None: 528 size = self.storage.size() # Size to reset the server object to 529 530 try: 531 self.cleanup() 532 self.logger.info("Closed %s", self) 533 except: 534 self.logger.warn("Closed %s with errors", self) 535 536 if self.file_obj is not None and self.can_write: 537 fid = self.file_obj.id.val 538 if not self.file_obj.isLoaded() or\ 539 self.file_obj.getDetails() is None or\ 540 self.file_obj.details.group is None: 541 self.logger.warn("Cannot update file object %s since group is none", fid) 542 else: 543 gid = self.file_obj.details.group.id.val 544 client_uuid = self.factory.ice_getIdentity().category[8:] 545 ctx = {"omero.group": str(gid), omero.constants.CLIENTUUID: client_uuid} 546 try: 547 rfs = self.factory.createRawFileStore(ctx) 548 try: 549 rfs.setFileId(fid, ctx) 550 if size: 551 rfs.truncate(size, ctx) # May do nothing 552 rfs.write([], size, 0, ctx) # Force an update 553 else: 554 rfs.write([], 0, 0, ctx) # No-op 555 file_obj = rfs.save(ctx) 556 finally: 557 rfs.close(ctx) 558 self.logger.info("Updated file object %s to sha1=%s (%s bytes)",\ 559 self.file_obj.id.val, file_obj.sha1.val, file_obj.size.val) 560 except: 561 self.logger.warn("Failed to update file object %s", self.file_obj.id.val, exc_info=1)
562 563 # TABLES READ API ============================ 564 565 @remoted 566 @perf
567 - def getOriginalFile(self, current = None):
568 msg = "unknown" 569 if self.file_obj: 570 if self.file_obj.id: 571 msg = self.file_obj.id.val 572 self.logger.info("%s.getOriginalFile() => id=%s", self, msg) 573 return self.file_obj
574 575 @remoted 576 @perf
577 - def getHeaders(self, current = None):
578 rv = self.storage.cols(None, current) 579 self.logger.info("%s.getHeaders() => size=%s", self, slen(rv)) 580 return rv
581 582 @remoted 583 @perf
584 - def getNumberOfRows(self, current = None):
585 rv = self.storage.rows() 586 self.logger.info("%s.getNumberOfRows() => %s", self, rv) 587 return long(rv)
588 589 @remoted 590 @perf
591 - def getWhereList(self, condition, variables, start, stop, step, current = None):
592 if stop == 0: 593 stop = None 594 if step == 0: 595 step = None 596 rv = self.storage.getWhereList(self.stamp, condition, variables, None, start, stop, step) 597 self.logger.info("%s.getWhereList(%s, %s, %s, %s, %s) => size=%s", self, condition, variables, start, stop, step, slen(rv)) 598 return rv
599 600 @remoted 601 @perf
602 - def readCoordinates(self, rowNumbers, current = None):
603 self.logger.info("%s.readCoordinates(size=%s)", self, slen(rowNumbers)) 604 try: 605 return self.storage.readCoordinates(self.stamp, rowNumbers, current) 606 except tables.HDF5ExtError, err: 607 aue = omero.ApiUsageException() 608 aue.message = "Error reading coordinates. Most likely out of range" 609 aue.serverStackTrace = "".join(traceback.format_exc()) 610 aue.serverExceptionClass = str(err.__class__.__name__) 611 raise aue
612 613 @remoted 614 @perf
615 - def read(self, colNumbers, start, stop, current = None):
616 self.logger.info("%s.read(%s, %s, %s)", self, colNumbers, start, stop) 617 try: 618 return self.storage.read(self.stamp, colNumbers, start, stop, current) 619 except tables.HDF5ExtError, err: 620 aue = omero.ApiUsageException() 621 aue.message = "Error reading coordinates. Most likely out of range" 622 aue.serverStackTrace = "".join(traceback.format_exc()) 623 aue.serverExceptionClass = str(err.__class__.__name__) 624 raise aue
625 626 @remoted 627 @perf
628 - def slice(self, colNumbers, rowNumbers, current = None):
629 self.logger.info("%s.slice(size=%s, size=%s)", self, slen(colNumbers), slen(rowNumbers)) 630 return self.storage.slice(self.stamp, colNumbers, rowNumbers, current)
631 632 # TABLES WRITE API =========================== 633 634 @remoted 635 @perf
636 - def initialize(self, cols, current = None):
637 self.assert_write() 638 self.storage.initialize(cols) 639 if cols: 640 self.logger.info("Initialized %s with %s col(s)", self, slen(cols))
641 642 @remoted 643 @perf
644 - def addColumn(self, col, current = None):
645 self.assert_write() 646 raise omero.ApiUsageException(None, None, "NYI")
647 648 @remoted 649 @perf
650 - def addData(self, cols, current = None):
651 self.assert_write() 652 self.storage.append(cols) 653 sz = 0 654 if cols and cols[0] and cols[0].getsize(): 655 self.logger.info("Added %s row(s) of data to %s", cols[0].getsize(), self)
656 657 @remoted 658 @perf
659 - def update(self, data, current = None):
660 self.assert_write() 661 if data: 662 self.storage.update(self.stamp, data) 663 self.logger.info("Updated %s row(s) of data to %s", slen(data.rowNumbers), self)
664 665 @remoted 666 @perf
667 - def delete(self, current = None):
668 self.assert_write() 669 self.close() 670 prx = self.factory.getDeleteService() 671 dc = omero.api.delete.DeleteCommand("/OriginalFile", self.file_obj.id.val, None) 672 handle = prx.queueDelete([dc]) 673 self.file_obj = None 674 # TODO: possible just return handle? 675 cb = omero.callbacks.DeleteCallbackI(current.adapter, handle) 676 count = 10 677 while count: 678 count -= 1 679 rv = cb.block(500) 680 if rv is not None: 681 report = handle.report()[0] 682 if rv > 0: 683 raise omero.InternalException(None, None, report.error) 684 else: 685 return 686 raise omero.InternalException(None, None, "delete timed-out")
687 688 689 # TABLES METADATA API =========================== 690 691 @remoted 692 @perf
693 - def getMetadata(self, key, current = None):
694 rv = self.storage.get_meta_map() 695 rv = rv.get(key) 696 self.logger.info("%s.getMetadata() => %s", self, unwrap(rv)) 697 return rv
698 699 @remoted 700 @perf
701 - def getAllMetadata(self, current = None):
702 rv = self.storage.get_meta_map() 703 self.logger.info("%s.getMetadata() => size=%s", self, slen(rv)) 704 return rv
705 706 @remoted 707 @perf
708 - def setMetadata(self, key, value, current = None):
709 self.assert_write() 710 self.storage.add_meta_map({key: value}) 711 self.logger.info("%s.setMetadata() => %s=%s", self, key, unwrap(value))
712 713 @remoted 714 @perf
715 - def setAllMetadata(self, value, current = None):
716 self.assert_write() 717 self.storage.add_meta_map({"key": wrap(value)}) 718 self.logger.info("%s.setMetadata() => number=%s", self, slen(value))
719
720 # Column methods missing 721 722 -class TablesI(omero.grid.Tables, omero.util.Servant):
723 """ 724 Implementation of the omero.grid.Tables API. Provides 725 spreadsheet like functionality across the OMERO.grid. 726 This servant serves as a session-less, user-less 727 resource for obtaining omero.grid.Table proxies. 728 729 The first major step in initialization is getting 730 a session. This will block until the Blitz server 731 is reachable. 732 """ 733
734 - def __init__(self,\ 735 ctx,\ 736 table_cast = omero.grid.TablePrx.uncheckedCast,\ 737 internal_repo_cast = omero.grid.InternalRepositoryPrx.checkedCast):
738 739 omero.util.Servant.__init__(self, ctx, needs_session = True) 740 741 # Storing these methods, mainly to allow overriding via 742 # test methods. Static methods are evil. 743 self._table_cast = table_cast 744 self._internal_repo_cast = internal_repo_cast 745 746 self.__stores = [] 747 self._get_dir() 748 self._get_uuid() 749 self._get_repo()
750
751 - def _get_dir(self):
752 """ 753 Second step in initialization is to find the .omero/repository 754 directory. If this is not created, then a required server has 755 not started, and so this instance will not start. 756 """ 757 wait = int(self.communicator.getProperties().getPropertyWithDefault("omero.repo.wait", "1")) 758 self.repo_dir = self.communicator.getProperties().getProperty("omero.repo.dir") 759 760 if not self.repo_dir: 761 # Implies this is the legacy directory. Obtain from server 762 self.repo_dir = self.ctx.getSession().getConfigService().getConfigValue("omero.data.dir") 763 764 self.repo_cfg = path(self.repo_dir) / ".omero" / "repository" 765 start = time.time() 766 while not self.repo_cfg.exists() and wait < (time.time() - start): 767 self.logger.info("%s doesn't exist; waiting 5 seconds..." % self.repo_cfg) 768 time.sleep(5) 769 count -= 1 770 if not self.repo_cfg.exists(): 771 msg = "No repository found: %s" % self.repo_cfg 772 self.logger.error(msg) 773 raise omero.ResourceError(None, None, msg)
774
775 - def _get_uuid(self):
776 """ 777 Third step in initialization is to find the database uuid 778 for this grid instance. Multiple OMERO.grids could be watching 779 the same directory. 780 """ 781 cfg = self.ctx.getSession().getConfigService() 782 self.db_uuid = cfg.getDatabaseUuid() 783 self.instance = self.repo_cfg / self.db_uuid
784
785 - def _get_repo(self):
786 """ 787 Fourth step in initialization is to find the repository object 788 for the UUID found in .omero/repository/<db_uuid>, and then 789 create a proxy for the InternalRepository attached to that. 790 """ 791 792 # Get and parse the uuid from the RandomAccessFile format from FileMaker 793 self.repo_uuid = (self.instance / "repo_uuid").lines()[0].strip() 794 if len(self.repo_uuid) != 38: 795 raise omero.ResourceError("Poorly formed UUID: %s" % self.repo_uuid) 796 self.repo_uuid = self.repo_uuid[2:] 797 798 # Using the repo_uuid, find our OriginalFile object 799 self.repo_obj = self.ctx.getSession().getQueryService().findByQuery("select f from OriginalFile f where sha1 = :uuid", 800 omero.sys.ParametersI().add("uuid", rstring(self.repo_uuid))) 801 self.repo_mgr = self.communicator.stringToProxy("InternalRepository-%s" % self.repo_uuid) 802 self.repo_mgr = self._internal_repo_cast(self.repo_mgr) 803 self.repo_svc = self.repo_mgr.getProxy()
804 805 @remoted
806 - def getRepository(self, current = None):
807 """ 808 Returns the Repository object for this Tables server. 809 """ 810 return self.repo_svc
811 812 @remoted 813 @perf
814 - def getTable(self, file_obj, factory, current = None):
815 """ 816 Create and/or register a table servant. 817 """ 818 819 # Will throw an exception if not allowed. 820 file_id = None 821 if file_obj is not None and file_obj.id is not None: 822 file_id = file_obj.id.val 823 self.logger.info("getTable: %s %s", file_id, current.ctx) 824 825 file_path = self.repo_mgr.getFilePath(file_obj) 826 p = path(file_path).dirname() 827 if not p.exists(): 828 p.makedirs() 829 830 storage = HDFLIST.getOrCreate(file_path) 831 id = Ice.Identity() 832 id.name = Ice.generateUUID() 833 table = TableI(self.ctx, file_obj, factory, storage, uuid = id.name) 834 self.resources.add(table) 835 836 prx = current.adapter.add(table, id) 837 return self._table_cast(prx)
838