Browse Source

Publish: Don't clear cached fields on publish.

Lacey Sanderson 6 years ago
parent
commit
ec1b3254d9
2 changed files with 7 additions and 20 deletions
  1. 6 6
      tripal/includes/TripalEntityController.inc
  2. 1 14
      tripal_chado/api/tripal_chado.api.inc

+ 6 - 6
tripal/includes/TripalEntityController.inc

@@ -225,15 +225,13 @@ class TripalEntityController extends EntityAPIController {
         // First delete any previous alias' for this entity.
         // Then save the new one.
 
-        // TODO: publishing an entity can be very slow if there are lots of
+        // @performance: Look into this further.
+        // @spficklin publishing an entity can be very slow if there are lots of
         // entries in the url_alias table, due to this type of
-        // SQL statement that gets called somewhere by Drupal:
+        // SQL statement that gets called in drupal_path_alias_whitelist_rebuild():
         // SELECT DISTINCT SUBSTRING_INDEX(source, '/', 1) AS path FROM url_alias.
         // Perhaps we should write our own SQL to avoid this issue.
-        // This happens in drupal_path_alias_whitelist_rebuild(). It appears we can
-        // get around it by whitelisting our paths before it's called...
         // @lacey: drupal_path_alias_whitelist_rebuild() isn't getting called for me during publish.
-        // @performance
         $values =  array(
           'source' => $source_url,
           'alias' => $alias,
@@ -460,7 +458,9 @@ class TripalEntityController extends EntityAPIController {
 
       // Clear any cache entries for this entity so it can be reloaded using
       // the values that were just saved.
-      if ($cache['clear_cached_fields']) {
+      // Also, we don't need to clear cached fields when publishing because we
+      // didn't attach any (see above).
+      if ($cache['clear_cached_fields'] AND ($invocation != 'entity_publish')) {
         $cid = 'field:TripalEntity:' . $entity->id;
         cache_clear_all($cid, 'cache_field', TRUE);
       }

+ 1 - 14
tripal_chado/api/tripal_chado.api.inc

@@ -39,7 +39,7 @@
  */
 function chado_publish_records($values, $job_id = NULL) {
 
-  // @performance remove after development
+  // Used for adding runtime to the progress report.
   $started_at = microtime(true);
 
   // We want the job object in order to report progress.
@@ -115,7 +115,6 @@ function chado_publish_records($values, $job_id = NULL) {
   $pkey_field = $table_schema['primary key'][0];
 
   // Construct the SQL for identifying which records should be published.
-  // @performance find a way to optimize this?
   $args = array();
   $select = "SELECT T.$pkey_field as record_id ";
   $from = "
@@ -217,9 +216,6 @@ function chado_publish_records($values, $job_id = NULL) {
   $total_published = 0;
   while ($more_records_to_publish) {
 
-    // @performance remove after development:0.43729090690613s
-    // @performance limiting this query DRASTICALLY decreases query execution time: 0.26s
-    // @performance print 'Perform Query :' . (microtime(true) - $started_at) . "s.\n\n";
     $records = chado_query($sql, $args);
 
     // Update the job status every chunk start.
@@ -235,11 +231,6 @@ function chado_publish_records($values, $job_id = NULL) {
         $total_published, $count, $complete * 3, number_format(memory_get_usage()), number_format((microtime(true) - $started_at)/60, 2));
     }
 
-    // @performance evaluate this transaction. Long running transactions can have serious
-    // performance issues in PostgreSQL. One option is to move the transaction within the
-    // loop so that each one is not very long but then we end up with more overhead creating
-    // transactions. A better albeit more complicated approach might be to break the job into
-    // chunks where each one is a single transaction.
     $transaction = db_transaction();
     $cache['transaction'] = $transaction;
 
@@ -265,10 +256,6 @@ function chado_publish_records($values, $job_id = NULL) {
           'bundle_object' => $bundle,
         ));
 
-        // We pass in the transaction and tell save not to clear the field cache for
-        // for performance reasons. We will clear the field cache in bulk below.
-        // @todo clear the field cache in bulk below ;-p
-        $cache['clear_cached_fields'] = FALSE;
         $entity = $entity->save($cache);
         if (!$entity) {
           throw new Exception('Could not create entity.');